id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
2,900
hotdoc/hotdoc
hotdoc/utils/loggable.py
Logger.info
def info(message, domain): """Log simple info""" if domain in Logger._ignored_domains: return Logger._log(None, message, INFO, domain)
python
def info(message, domain): """Log simple info""" if domain in Logger._ignored_domains: return Logger._log(None, message, INFO, domain)
[ "def", "info", "(", "message", ",", "domain", ")", ":", "if", "domain", "in", "Logger", ".", "_ignored_domains", ":", "return", "Logger", ".", "_log", "(", "None", ",", "message", ",", "INFO", ",", "domain", ")" ]
Log simple info
[ "Log", "simple", "info" ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/loggable.py#L270-L275
2,901
hotdoc/hotdoc
hotdoc/utils/loggable.py
Logger.get_issues
def get_issues(): """Get actual issues in the journal.""" issues = [] for entry in Logger.journal: if entry.level >= WARNING: issues.append(entry) return issues
python
def get_issues(): """Get actual issues in the journal.""" issues = [] for entry in Logger.journal: if entry.level >= WARNING: issues.append(entry) return issues
[ "def", "get_issues", "(", ")", ":", "issues", "=", "[", "]", "for", "entry", "in", "Logger", ".", "journal", ":", "if", "entry", ".", "level", ">=", "WARNING", ":", "issues", ".", "append", "(", "entry", ")", "return", "issues" ]
Get actual issues in the journal.
[ "Get", "actual", "issues", "in", "the", "journal", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/loggable.py#L298-L304
2,902
hotdoc/hotdoc
hotdoc/utils/loggable.py
Logger.reset
def reset(): """Resets Logger to its initial state""" Logger.journal = [] Logger.fatal_warnings = False Logger._ignored_codes = set() Logger._ignored_domains = set() Logger._verbosity = 2 Logger._last_checkpoint = 0
python
def reset(): """Resets Logger to its initial state""" Logger.journal = [] Logger.fatal_warnings = False Logger._ignored_codes = set() Logger._ignored_domains = set() Logger._verbosity = 2 Logger._last_checkpoint = 0
[ "def", "reset", "(", ")", ":", "Logger", ".", "journal", "=", "[", "]", "Logger", ".", "fatal_warnings", "=", "False", "Logger", ".", "_ignored_codes", "=", "set", "(", ")", "Logger", ".", "_ignored_domains", "=", "set", "(", ")", "Logger", ".", "_verbosity", "=", "2", "Logger", ".", "_last_checkpoint", "=", "0" ]
Resets Logger to its initial state
[ "Resets", "Logger", "to", "its", "initial", "state" ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/loggable.py#L307-L314
2,903
hotdoc/hotdoc
hotdoc/parsers/sitemap.py
Sitemap.walk
def walk(self, action, user_data=None): """ Walk the hierarchy, applying action to each filename. Args: action: callable, the callable to invoke for each filename, will be invoked with the filename, the subfiles, and the level in the sitemap. """ action(self.index_file, self.__root, 0, user_data) self.__do_walk(self.__root, 1, action, user_data)
python
def walk(self, action, user_data=None): """ Walk the hierarchy, applying action to each filename. Args: action: callable, the callable to invoke for each filename, will be invoked with the filename, the subfiles, and the level in the sitemap. """ action(self.index_file, self.__root, 0, user_data) self.__do_walk(self.__root, 1, action, user_data)
[ "def", "walk", "(", "self", ",", "action", ",", "user_data", "=", "None", ")", ":", "action", "(", "self", ".", "index_file", ",", "self", ".", "__root", ",", "0", ",", "user_data", ")", "self", ".", "__do_walk", "(", "self", ".", "__root", ",", "1", ",", "action", ",", "user_data", ")" ]
Walk the hierarchy, applying action to each filename. Args: action: callable, the callable to invoke for each filename, will be invoked with the filename, the subfiles, and the level in the sitemap.
[ "Walk", "the", "hierarchy", "applying", "action", "to", "each", "filename", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/parsers/sitemap.py#L77-L87
2,904
hotdoc/hotdoc
hotdoc/parsers/sitemap.py
SitemapParser.parse
def parse(self, filename): """ Parse a sitemap file. Args: filename: str, the path to the sitemap file. Returns: Sitemap: the generated sitemap. """ with io.open(filename, 'r', encoding='utf-8') as _: lines = _.readlines() all_source_files = set() source_map = {} lineno = 0 root = None index = None cur_level = -1 parent_queue = [] for line in lines: try: level, line = dedent(line) if line.startswith('#'): lineno += 1 continue elif line.startswith('\\#'): line = line[1:] except IndentError as exc: error('bad-indent', 'Invalid indentation', filename=filename, lineno=lineno, column=exc.column) if not line: lineno += 1 continue source_file = dequote(line) if not source_file: lineno += 1 continue if source_file in all_source_files: error('sitemap-duplicate', 'Filename listed twice', filename=filename, lineno=lineno, column=level * 8 + 1) all_source_files.add(source_file) source_map[source_file] = (lineno, level * 8 + 1) page = OrderedDict() if root is not None and level == 0: error('sitemap-error', 'Sitemaps only support one root', filename=filename, lineno=lineno, column=0) if root is None: root = page index = source_file else: lvl_diff = cur_level - level while lvl_diff >= 0: parent_queue.pop() lvl_diff -= 1 parent_queue[-1][source_file] = page parent_queue.append(page) cur_level = level lineno += 1 return Sitemap(root, filename, index, source_map)
python
def parse(self, filename): """ Parse a sitemap file. Args: filename: str, the path to the sitemap file. Returns: Sitemap: the generated sitemap. """ with io.open(filename, 'r', encoding='utf-8') as _: lines = _.readlines() all_source_files = set() source_map = {} lineno = 0 root = None index = None cur_level = -1 parent_queue = [] for line in lines: try: level, line = dedent(line) if line.startswith('#'): lineno += 1 continue elif line.startswith('\\#'): line = line[1:] except IndentError as exc: error('bad-indent', 'Invalid indentation', filename=filename, lineno=lineno, column=exc.column) if not line: lineno += 1 continue source_file = dequote(line) if not source_file: lineno += 1 continue if source_file in all_source_files: error('sitemap-duplicate', 'Filename listed twice', filename=filename, lineno=lineno, column=level * 8 + 1) all_source_files.add(source_file) source_map[source_file] = (lineno, level * 8 + 1) page = OrderedDict() if root is not None and level == 0: error('sitemap-error', 'Sitemaps only support one root', filename=filename, lineno=lineno, column=0) if root is None: root = page index = source_file else: lvl_diff = cur_level - level while lvl_diff >= 0: parent_queue.pop() lvl_diff -= 1 parent_queue[-1][source_file] = page parent_queue.append(page) cur_level = level lineno += 1 return Sitemap(root, filename, index, source_map)
[ "def", "parse", "(", "self", ",", "filename", ")", ":", "with", "io", ".", "open", "(", "filename", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "_", ":", "lines", "=", "_", ".", "readlines", "(", ")", "all_source_files", "=", "set", "(", ")", "source_map", "=", "{", "}", "lineno", "=", "0", "root", "=", "None", "index", "=", "None", "cur_level", "=", "-", "1", "parent_queue", "=", "[", "]", "for", "line", "in", "lines", ":", "try", ":", "level", ",", "line", "=", "dedent", "(", "line", ")", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "lineno", "+=", "1", "continue", "elif", "line", ".", "startswith", "(", "'\\\\#'", ")", ":", "line", "=", "line", "[", "1", ":", "]", "except", "IndentError", "as", "exc", ":", "error", "(", "'bad-indent'", ",", "'Invalid indentation'", ",", "filename", "=", "filename", ",", "lineno", "=", "lineno", ",", "column", "=", "exc", ".", "column", ")", "if", "not", "line", ":", "lineno", "+=", "1", "continue", "source_file", "=", "dequote", "(", "line", ")", "if", "not", "source_file", ":", "lineno", "+=", "1", "continue", "if", "source_file", "in", "all_source_files", ":", "error", "(", "'sitemap-duplicate'", ",", "'Filename listed twice'", ",", "filename", "=", "filename", ",", "lineno", "=", "lineno", ",", "column", "=", "level", "*", "8", "+", "1", ")", "all_source_files", ".", "add", "(", "source_file", ")", "source_map", "[", "source_file", "]", "=", "(", "lineno", ",", "level", "*", "8", "+", "1", ")", "page", "=", "OrderedDict", "(", ")", "if", "root", "is", "not", "None", "and", "level", "==", "0", ":", "error", "(", "'sitemap-error'", ",", "'Sitemaps only support one root'", ",", "filename", "=", "filename", ",", "lineno", "=", "lineno", ",", "column", "=", "0", ")", "if", "root", "is", "None", ":", "root", "=", "page", "index", "=", "source_file", "else", ":", "lvl_diff", "=", "cur_level", "-", "level", "while", "lvl_diff", ">=", "0", ":", "parent_queue", ".", "pop", "(", ")", "lvl_diff", "-=", "1", "parent_queue", "[", "-", "1", "]", "[", "source_file", "]", "=", "page", "parent_queue", ".", "append", "(", "page", ")", "cur_level", "=", "level", "lineno", "+=", "1", "return", "Sitemap", "(", "root", ",", "filename", ",", "index", ",", "source_map", ")" ]
Parse a sitemap file. Args: filename: str, the path to the sitemap file. Returns: Sitemap: the generated sitemap.
[ "Parse", "a", "sitemap", "file", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/parsers/sitemap.py#L144-L218
2,905
hotdoc/hotdoc
hotdoc/parsers/gtk_doc.py
GtkDocParser.parse_comment
def parse_comment(self, comment, filename, lineno, endlineno, include_paths=None, stripped=False): """ Returns a Comment given a string """ if not stripped and not self.__validate_c_comment(comment.strip()): return None title_offset = 0 column_offset = 0 raw_comment = comment if not stripped: try: while comment[column_offset * -1 - 1] != '\n': column_offset += 1 except IndexError: column_offset = 0 comment, title_offset = self.__strip_comment(comment) title_and_params, description = self.__extract_titles_params_and_description(comment) try: block_name, parameters, annotations, is_section = \ self.__parse_title_and_parameters(filename, title_and_params) except HotdocSourceException as _: warn('gtk-doc-bad-syntax', message=_.message, filename=filename, lineno=lineno + title_offset) return None params_offset = 0 for param in parameters: param.filename = filename param.lineno = lineno param_offset = param.line_offset param.line_offset = title_offset + params_offset + 1 params_offset += param_offset param.col_offset = column_offset if not block_name: return None description_offset = 0 meta = {} tags = [] if description is not None: n_lines = len(comment.split('\n')) description_offset = (title_offset + n_lines - len(description.split('\n'))) meta['description'], tags = self.__parse_description_and_tags(description) actual_parameters = OrderedDict({}) for param in parameters: if is_section: cleaned_up_name = param.name.lower().replace('_', '-') if cleaned_up_name in ['symbols', 'private-symbols', 'auto-sort', 'sources']: meta.update(self.__parse_yaml_comment(param, filename)) if cleaned_up_name == 'sources': sources_paths = [os.path.abspath(os.path.join(os.path.dirname(filename), path)) for path in meta[cleaned_up_name]] meta[cleaned_up_name] = sources_paths else: meta[param.name] = param.description else: actual_parameters[param.name] = param annotations = {annotation.name: annotation for annotation in annotations} tags = {tag.name.lower(): tag for tag in tags} block = Comment(name=block_name, filename=filename, lineno=lineno, endlineno=endlineno, annotations=annotations, params=actual_parameters, tags=tags, raw_comment=raw_comment, meta=meta, toplevel=is_section) block.line_offset = description_offset block.col_offset = column_offset return block
python
def parse_comment(self, comment, filename, lineno, endlineno, include_paths=None, stripped=False): """ Returns a Comment given a string """ if not stripped and not self.__validate_c_comment(comment.strip()): return None title_offset = 0 column_offset = 0 raw_comment = comment if not stripped: try: while comment[column_offset * -1 - 1] != '\n': column_offset += 1 except IndexError: column_offset = 0 comment, title_offset = self.__strip_comment(comment) title_and_params, description = self.__extract_titles_params_and_description(comment) try: block_name, parameters, annotations, is_section = \ self.__parse_title_and_parameters(filename, title_and_params) except HotdocSourceException as _: warn('gtk-doc-bad-syntax', message=_.message, filename=filename, lineno=lineno + title_offset) return None params_offset = 0 for param in parameters: param.filename = filename param.lineno = lineno param_offset = param.line_offset param.line_offset = title_offset + params_offset + 1 params_offset += param_offset param.col_offset = column_offset if not block_name: return None description_offset = 0 meta = {} tags = [] if description is not None: n_lines = len(comment.split('\n')) description_offset = (title_offset + n_lines - len(description.split('\n'))) meta['description'], tags = self.__parse_description_and_tags(description) actual_parameters = OrderedDict({}) for param in parameters: if is_section: cleaned_up_name = param.name.lower().replace('_', '-') if cleaned_up_name in ['symbols', 'private-symbols', 'auto-sort', 'sources']: meta.update(self.__parse_yaml_comment(param, filename)) if cleaned_up_name == 'sources': sources_paths = [os.path.abspath(os.path.join(os.path.dirname(filename), path)) for path in meta[cleaned_up_name]] meta[cleaned_up_name] = sources_paths else: meta[param.name] = param.description else: actual_parameters[param.name] = param annotations = {annotation.name: annotation for annotation in annotations} tags = {tag.name.lower(): tag for tag in tags} block = Comment(name=block_name, filename=filename, lineno=lineno, endlineno=endlineno, annotations=annotations, params=actual_parameters, tags=tags, raw_comment=raw_comment, meta=meta, toplevel=is_section) block.line_offset = description_offset block.col_offset = column_offset return block
[ "def", "parse_comment", "(", "self", ",", "comment", ",", "filename", ",", "lineno", ",", "endlineno", ",", "include_paths", "=", "None", ",", "stripped", "=", "False", ")", ":", "if", "not", "stripped", "and", "not", "self", ".", "__validate_c_comment", "(", "comment", ".", "strip", "(", ")", ")", ":", "return", "None", "title_offset", "=", "0", "column_offset", "=", "0", "raw_comment", "=", "comment", "if", "not", "stripped", ":", "try", ":", "while", "comment", "[", "column_offset", "*", "-", "1", "-", "1", "]", "!=", "'\\n'", ":", "column_offset", "+=", "1", "except", "IndexError", ":", "column_offset", "=", "0", "comment", ",", "title_offset", "=", "self", ".", "__strip_comment", "(", "comment", ")", "title_and_params", ",", "description", "=", "self", ".", "__extract_titles_params_and_description", "(", "comment", ")", "try", ":", "block_name", ",", "parameters", ",", "annotations", ",", "is_section", "=", "self", ".", "__parse_title_and_parameters", "(", "filename", ",", "title_and_params", ")", "except", "HotdocSourceException", "as", "_", ":", "warn", "(", "'gtk-doc-bad-syntax'", ",", "message", "=", "_", ".", "message", ",", "filename", "=", "filename", ",", "lineno", "=", "lineno", "+", "title_offset", ")", "return", "None", "params_offset", "=", "0", "for", "param", "in", "parameters", ":", "param", ".", "filename", "=", "filename", "param", ".", "lineno", "=", "lineno", "param_offset", "=", "param", ".", "line_offset", "param", ".", "line_offset", "=", "title_offset", "+", "params_offset", "+", "1", "params_offset", "+=", "param_offset", "param", ".", "col_offset", "=", "column_offset", "if", "not", "block_name", ":", "return", "None", "description_offset", "=", "0", "meta", "=", "{", "}", "tags", "=", "[", "]", "if", "description", "is", "not", "None", ":", "n_lines", "=", "len", "(", "comment", ".", "split", "(", "'\\n'", ")", ")", "description_offset", "=", "(", "title_offset", "+", "n_lines", "-", "len", "(", "description", ".", "split", "(", "'\\n'", ")", ")", ")", "meta", "[", "'description'", "]", ",", "tags", "=", "self", ".", "__parse_description_and_tags", "(", "description", ")", "actual_parameters", "=", "OrderedDict", "(", "{", "}", ")", "for", "param", "in", "parameters", ":", "if", "is_section", ":", "cleaned_up_name", "=", "param", ".", "name", ".", "lower", "(", ")", ".", "replace", "(", "'_'", ",", "'-'", ")", "if", "cleaned_up_name", "in", "[", "'symbols'", ",", "'private-symbols'", ",", "'auto-sort'", ",", "'sources'", "]", ":", "meta", ".", "update", "(", "self", ".", "__parse_yaml_comment", "(", "param", ",", "filename", ")", ")", "if", "cleaned_up_name", "==", "'sources'", ":", "sources_paths", "=", "[", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ",", "path", ")", ")", "for", "path", "in", "meta", "[", "cleaned_up_name", "]", "]", "meta", "[", "cleaned_up_name", "]", "=", "sources_paths", "else", ":", "meta", "[", "param", ".", "name", "]", "=", "param", ".", "description", "else", ":", "actual_parameters", "[", "param", ".", "name", "]", "=", "param", "annotations", "=", "{", "annotation", ".", "name", ":", "annotation", "for", "annotation", "in", "annotations", "}", "tags", "=", "{", "tag", ".", "name", ".", "lower", "(", ")", ":", "tag", "for", "tag", "in", "tags", "}", "block", "=", "Comment", "(", "name", "=", "block_name", ",", "filename", "=", "filename", ",", "lineno", "=", "lineno", ",", "endlineno", "=", "endlineno", ",", "annotations", "=", "annotations", ",", "params", "=", "actual_parameters", ",", "tags", "=", "tags", ",", "raw_comment", "=", "raw_comment", ",", "meta", "=", "meta", ",", "toplevel", "=", "is_section", ")", "block", ".", "line_offset", "=", "description_offset", "block", ".", "col_offset", "=", "column_offset", "return", "block" ]
Returns a Comment given a string
[ "Returns", "a", "Comment", "given", "a", "string" ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/parsers/gtk_doc.py#L303-L382
2,906
hotdoc/hotdoc
hotdoc/parsers/gtk_doc.py
GtkDocStringFormatter.comment_to_ast
def comment_to_ast(self, comment, link_resolver): """ Given a gtk-doc comment string, returns an opaque PyCapsule containing the document root. This is an optimization allowing to parse the docstring only once, and to render it multiple times with `ast_to_html`, links discovery and most of the link resolution being lazily done in that second phase. If you don't care about performance, you should simply use `translate`. Args: text: unicode, the docstring to parse. link_resolver: hotdoc.core.links.LinkResolver, an object which will be called to retrieve `hotdoc.core.links.Link` objects. Returns: capsule: A PyCapsule wrapping an opaque C pointer, which can be passed to `ast_to_html` afterwards. diagnostics: A list of diagnostics as output by the gtk-doc cmark extension """ assert comment is not None text = comment.description if (self.remove_xml_tags or comment.filename in self.gdbus_codegen_sources): text = re.sub('<.*?>', '', text) if self.escape_html: # pylint: disable=deprecated-method text = cgi.escape(text) ast, diagnostics = cmark.gtkdoc_to_ast(text, link_resolver) for diag in diagnostics: if (comment.filename and comment.filename not in self.gdbus_codegen_sources): column = diag.column + comment.col_offset if diag.lineno == 0: column += comment.initial_col_offset lines = text.split('\n') line = lines[diag.lineno] i = 0 while line[i] == ' ': i += 1 column += i - 1 if diag.lineno > 0 and any([c != ' ' for c in lines[diag.lineno - 1]]): column += 1 lineno = -1 if comment.lineno != -1: lineno = (comment.lineno - 1 + comment.line_offset + diag.lineno) warn( diag.code, message=diag.message, filename=comment.filename, lineno=lineno, column=column) return ast
python
def comment_to_ast(self, comment, link_resolver): """ Given a gtk-doc comment string, returns an opaque PyCapsule containing the document root. This is an optimization allowing to parse the docstring only once, and to render it multiple times with `ast_to_html`, links discovery and most of the link resolution being lazily done in that second phase. If you don't care about performance, you should simply use `translate`. Args: text: unicode, the docstring to parse. link_resolver: hotdoc.core.links.LinkResolver, an object which will be called to retrieve `hotdoc.core.links.Link` objects. Returns: capsule: A PyCapsule wrapping an opaque C pointer, which can be passed to `ast_to_html` afterwards. diagnostics: A list of diagnostics as output by the gtk-doc cmark extension """ assert comment is not None text = comment.description if (self.remove_xml_tags or comment.filename in self.gdbus_codegen_sources): text = re.sub('<.*?>', '', text) if self.escape_html: # pylint: disable=deprecated-method text = cgi.escape(text) ast, diagnostics = cmark.gtkdoc_to_ast(text, link_resolver) for diag in diagnostics: if (comment.filename and comment.filename not in self.gdbus_codegen_sources): column = diag.column + comment.col_offset if diag.lineno == 0: column += comment.initial_col_offset lines = text.split('\n') line = lines[diag.lineno] i = 0 while line[i] == ' ': i += 1 column += i - 1 if diag.lineno > 0 and any([c != ' ' for c in lines[diag.lineno - 1]]): column += 1 lineno = -1 if comment.lineno != -1: lineno = (comment.lineno - 1 + comment.line_offset + diag.lineno) warn( diag.code, message=diag.message, filename=comment.filename, lineno=lineno, column=column) return ast
[ "def", "comment_to_ast", "(", "self", ",", "comment", ",", "link_resolver", ")", ":", "assert", "comment", "is", "not", "None", "text", "=", "comment", ".", "description", "if", "(", "self", ".", "remove_xml_tags", "or", "comment", ".", "filename", "in", "self", ".", "gdbus_codegen_sources", ")", ":", "text", "=", "re", ".", "sub", "(", "'<.*?>'", ",", "''", ",", "text", ")", "if", "self", ".", "escape_html", ":", "# pylint: disable=deprecated-method", "text", "=", "cgi", ".", "escape", "(", "text", ")", "ast", ",", "diagnostics", "=", "cmark", ".", "gtkdoc_to_ast", "(", "text", ",", "link_resolver", ")", "for", "diag", "in", "diagnostics", ":", "if", "(", "comment", ".", "filename", "and", "comment", ".", "filename", "not", "in", "self", ".", "gdbus_codegen_sources", ")", ":", "column", "=", "diag", ".", "column", "+", "comment", ".", "col_offset", "if", "diag", ".", "lineno", "==", "0", ":", "column", "+=", "comment", ".", "initial_col_offset", "lines", "=", "text", ".", "split", "(", "'\\n'", ")", "line", "=", "lines", "[", "diag", ".", "lineno", "]", "i", "=", "0", "while", "line", "[", "i", "]", "==", "' '", ":", "i", "+=", "1", "column", "+=", "i", "-", "1", "if", "diag", ".", "lineno", ">", "0", "and", "any", "(", "[", "c", "!=", "' '", "for", "c", "in", "lines", "[", "diag", ".", "lineno", "-", "1", "]", "]", ")", ":", "column", "+=", "1", "lineno", "=", "-", "1", "if", "comment", ".", "lineno", "!=", "-", "1", ":", "lineno", "=", "(", "comment", ".", "lineno", "-", "1", "+", "comment", ".", "line_offset", "+", "diag", ".", "lineno", ")", "warn", "(", "diag", ".", "code", ",", "message", "=", "diag", ".", "message", ",", "filename", "=", "comment", ".", "filename", ",", "lineno", "=", "lineno", ",", "column", "=", "column", ")", "return", "ast" ]
Given a gtk-doc comment string, returns an opaque PyCapsule containing the document root. This is an optimization allowing to parse the docstring only once, and to render it multiple times with `ast_to_html`, links discovery and most of the link resolution being lazily done in that second phase. If you don't care about performance, you should simply use `translate`. Args: text: unicode, the docstring to parse. link_resolver: hotdoc.core.links.LinkResolver, an object which will be called to retrieve `hotdoc.core.links.Link` objects. Returns: capsule: A PyCapsule wrapping an opaque C pointer, which can be passed to `ast_to_html` afterwards. diagnostics: A list of diagnostics as output by the gtk-doc cmark extension
[ "Given", "a", "gtk", "-", "doc", "comment", "string", "returns", "an", "opaque", "PyCapsule", "containing", "the", "document", "root", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/parsers/gtk_doc.py#L396-L464
2,907
hotdoc/hotdoc
hotdoc/parsers/gtk_doc.py
GtkDocStringFormatter.ast_to_html
def ast_to_html(self, ast, link_resolver): """ See the documentation of `to_ast` for more information. Args: ast: PyCapsule, a capsule as returned by `to_ast` link_resolver: hotdoc.core.links.LinkResolver, a link resolver instance. """ out, _ = cmark.ast_to_html(ast, link_resolver) return out
python
def ast_to_html(self, ast, link_resolver): """ See the documentation of `to_ast` for more information. Args: ast: PyCapsule, a capsule as returned by `to_ast` link_resolver: hotdoc.core.links.LinkResolver, a link resolver instance. """ out, _ = cmark.ast_to_html(ast, link_resolver) return out
[ "def", "ast_to_html", "(", "self", ",", "ast", ",", "link_resolver", ")", ":", "out", ",", "_", "=", "cmark", ".", "ast_to_html", "(", "ast", ",", "link_resolver", ")", "return", "out" ]
See the documentation of `to_ast` for more information. Args: ast: PyCapsule, a capsule as returned by `to_ast` link_resolver: hotdoc.core.links.LinkResolver, a link resolver instance.
[ "See", "the", "documentation", "of", "to_ast", "for", "more", "information", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/parsers/gtk_doc.py#L467-L478
2,908
hotdoc/hotdoc
hotdoc/parsers/gtk_doc.py
GtkDocStringFormatter.translate_comment
def translate_comment(self, comment, link_resolver): """ Given a gtk-doc comment string, returns the comment translated to the desired format. """ out = u'' self.translate_tags(comment, link_resolver) ast = self.comment_to_ast(comment, link_resolver) out += self.ast_to_html(ast, link_resolver) return out
python
def translate_comment(self, comment, link_resolver): """ Given a gtk-doc comment string, returns the comment translated to the desired format. """ out = u'' self.translate_tags(comment, link_resolver) ast = self.comment_to_ast(comment, link_resolver) out += self.ast_to_html(ast, link_resolver) return out
[ "def", "translate_comment", "(", "self", ",", "comment", ",", "link_resolver", ")", ":", "out", "=", "u''", "self", ".", "translate_tags", "(", "comment", ",", "link_resolver", ")", "ast", "=", "self", ".", "comment_to_ast", "(", "comment", ",", "link_resolver", ")", "out", "+=", "self", ".", "ast_to_html", "(", "ast", ",", "link_resolver", ")", "return", "out" ]
Given a gtk-doc comment string, returns the comment translated to the desired format.
[ "Given", "a", "gtk", "-", "doc", "comment", "string", "returns", "the", "comment", "translated", "to", "the", "desired", "format", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/parsers/gtk_doc.py#L480-L490
2,909
hotdoc/hotdoc
hotdoc/core/comment.py
comment_from_tag
def comment_from_tag(tag): """ Convenience function to create a full-fledged comment for a given tag, for example it is convenient to assign a Comment to a ReturnValueSymbol. """ if not tag: return None comment = Comment(name=tag.name, meta={'description': tag.description}, annotations=tag.annotations) return comment
python
def comment_from_tag(tag): """ Convenience function to create a full-fledged comment for a given tag, for example it is convenient to assign a Comment to a ReturnValueSymbol. """ if not tag: return None comment = Comment(name=tag.name, meta={'description': tag.description}, annotations=tag.annotations) return comment
[ "def", "comment_from_tag", "(", "tag", ")", ":", "if", "not", "tag", ":", "return", "None", "comment", "=", "Comment", "(", "name", "=", "tag", ".", "name", ",", "meta", "=", "{", "'description'", ":", "tag", ".", "description", "}", ",", "annotations", "=", "tag", ".", "annotations", ")", "return", "comment" ]
Convenience function to create a full-fledged comment for a given tag, for example it is convenient to assign a Comment to a ReturnValueSymbol.
[ "Convenience", "function", "to", "create", "a", "full", "-", "fledged", "comment", "for", "a", "given", "tag", "for", "example", "it", "is", "convenient", "to", "assign", "a", "Comment", "to", "a", "ReturnValueSymbol", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/comment.py#L159-L170
2,910
tcalmant/python-javaobj
javaobj/modifiedutf8.py
decoder
def decoder(data): """ This generator processes a sequence of bytes in Modified UTF-8 encoding and produces a sequence of unicode string characters. It takes bits from the byte until it matches one of the known encoding sequences. It uses ``DecodeMap`` to mask, compare and generate values. :param data: a string of bytes in Modified UTF-8 encoding. :return: a generator producing a string of unicode characters :raises UnicodeDecodeError: unrecognised byte in sequence encountered. """ def next_byte(_it, start, count): try: return next(_it)[1] except StopIteration: raise UnicodeDecodeError( NAME, data, start, start + count, "incomplete byte sequence" ) it = iter(enumerate(data)) for i, d in it: if d == 0x00: # 00000000 raise UnicodeDecodeError( NAME, data, i, i + 1, "embedded zero-byte not allowed" ) elif d & 0x80: # 1xxxxxxx if d & 0x40: # 11xxxxxx if d & 0x20: # 111xxxxx if d & 0x10: # 1111xxxx raise UnicodeDecodeError( NAME, data, i, i + 1, "invalid encoding character" ) elif d == 0xED: value = 0 for i1, dm in enumerate(DECODE_MAP[6]): d1 = next_byte(it, i, i1 + 1) value = dm.apply(d1, value, data, i, i1 + 1) else: # 1110xxxx value = d & 0x0F for i1, dm in enumerate(DECODE_MAP[3]): d1 = next_byte(it, i, i1 + 1) value = dm.apply(d1, value, data, i, i1 + 1) else: # 110xxxxx value = d & 0x1F for i1, dm in enumerate(DECODE_MAP[2]): d1 = next_byte(it, i, i1 + 1) value = dm.apply(d1, value, data, i, i1 + 1) else: # 10xxxxxx raise UnicodeDecodeError( NAME, data, i, i + 1, "misplaced continuation character" ) else: # 0xxxxxxx value = d # noinspection PyCompatibility yield mutf8_unichr(value)
python
def decoder(data): """ This generator processes a sequence of bytes in Modified UTF-8 encoding and produces a sequence of unicode string characters. It takes bits from the byte until it matches one of the known encoding sequences. It uses ``DecodeMap`` to mask, compare and generate values. :param data: a string of bytes in Modified UTF-8 encoding. :return: a generator producing a string of unicode characters :raises UnicodeDecodeError: unrecognised byte in sequence encountered. """ def next_byte(_it, start, count): try: return next(_it)[1] except StopIteration: raise UnicodeDecodeError( NAME, data, start, start + count, "incomplete byte sequence" ) it = iter(enumerate(data)) for i, d in it: if d == 0x00: # 00000000 raise UnicodeDecodeError( NAME, data, i, i + 1, "embedded zero-byte not allowed" ) elif d & 0x80: # 1xxxxxxx if d & 0x40: # 11xxxxxx if d & 0x20: # 111xxxxx if d & 0x10: # 1111xxxx raise UnicodeDecodeError( NAME, data, i, i + 1, "invalid encoding character" ) elif d == 0xED: value = 0 for i1, dm in enumerate(DECODE_MAP[6]): d1 = next_byte(it, i, i1 + 1) value = dm.apply(d1, value, data, i, i1 + 1) else: # 1110xxxx value = d & 0x0F for i1, dm in enumerate(DECODE_MAP[3]): d1 = next_byte(it, i, i1 + 1) value = dm.apply(d1, value, data, i, i1 + 1) else: # 110xxxxx value = d & 0x1F for i1, dm in enumerate(DECODE_MAP[2]): d1 = next_byte(it, i, i1 + 1) value = dm.apply(d1, value, data, i, i1 + 1) else: # 10xxxxxx raise UnicodeDecodeError( NAME, data, i, i + 1, "misplaced continuation character" ) else: # 0xxxxxxx value = d # noinspection PyCompatibility yield mutf8_unichr(value)
[ "def", "decoder", "(", "data", ")", ":", "def", "next_byte", "(", "_it", ",", "start", ",", "count", ")", ":", "try", ":", "return", "next", "(", "_it", ")", "[", "1", "]", "except", "StopIteration", ":", "raise", "UnicodeDecodeError", "(", "NAME", ",", "data", ",", "start", ",", "start", "+", "count", ",", "\"incomplete byte sequence\"", ")", "it", "=", "iter", "(", "enumerate", "(", "data", ")", ")", "for", "i", ",", "d", "in", "it", ":", "if", "d", "==", "0x00", ":", "# 00000000", "raise", "UnicodeDecodeError", "(", "NAME", ",", "data", ",", "i", ",", "i", "+", "1", ",", "\"embedded zero-byte not allowed\"", ")", "elif", "d", "&", "0x80", ":", "# 1xxxxxxx", "if", "d", "&", "0x40", ":", "# 11xxxxxx", "if", "d", "&", "0x20", ":", "# 111xxxxx", "if", "d", "&", "0x10", ":", "# 1111xxxx", "raise", "UnicodeDecodeError", "(", "NAME", ",", "data", ",", "i", ",", "i", "+", "1", ",", "\"invalid encoding character\"", ")", "elif", "d", "==", "0xED", ":", "value", "=", "0", "for", "i1", ",", "dm", "in", "enumerate", "(", "DECODE_MAP", "[", "6", "]", ")", ":", "d1", "=", "next_byte", "(", "it", ",", "i", ",", "i1", "+", "1", ")", "value", "=", "dm", ".", "apply", "(", "d1", ",", "value", ",", "data", ",", "i", ",", "i1", "+", "1", ")", "else", ":", "# 1110xxxx", "value", "=", "d", "&", "0x0F", "for", "i1", ",", "dm", "in", "enumerate", "(", "DECODE_MAP", "[", "3", "]", ")", ":", "d1", "=", "next_byte", "(", "it", ",", "i", ",", "i1", "+", "1", ")", "value", "=", "dm", ".", "apply", "(", "d1", ",", "value", ",", "data", ",", "i", ",", "i1", "+", "1", ")", "else", ":", "# 110xxxxx", "value", "=", "d", "&", "0x1F", "for", "i1", ",", "dm", "in", "enumerate", "(", "DECODE_MAP", "[", "2", "]", ")", ":", "d1", "=", "next_byte", "(", "it", ",", "i", ",", "i1", "+", "1", ")", "value", "=", "dm", ".", "apply", "(", "d1", ",", "value", ",", "data", ",", "i", ",", "i1", "+", "1", ")", "else", ":", "# 10xxxxxx", "raise", "UnicodeDecodeError", "(", "NAME", ",", "data", ",", "i", ",", "i", "+", "1", ",", "\"misplaced continuation character\"", ")", "else", ":", "# 0xxxxxxx", "value", "=", "d", "# noinspection PyCompatibility", "yield", "mutf8_unichr", "(", "value", ")" ]
This generator processes a sequence of bytes in Modified UTF-8 encoding and produces a sequence of unicode string characters. It takes bits from the byte until it matches one of the known encoding sequences. It uses ``DecodeMap`` to mask, compare and generate values. :param data: a string of bytes in Modified UTF-8 encoding. :return: a generator producing a string of unicode characters :raises UnicodeDecodeError: unrecognised byte in sequence encountered.
[ "This", "generator", "processes", "a", "sequence", "of", "bytes", "in", "Modified", "UTF", "-", "8", "encoding", "and", "produces", "a", "sequence", "of", "unicode", "string", "characters", "." ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/modifiedutf8.py#L103-L160
2,911
tcalmant/python-javaobj
javaobj/modifiedutf8.py
decode_modified_utf8
def decode_modified_utf8(data, errors="strict"): """ Decodes a sequence of bytes to a unicode text and length using Modified UTF-8. This function is designed to be used with Python ``codecs`` module. :param data: a string of bytes in Modified UTF-8 :param errors: handle decoding errors :return: unicode text and length :raises UnicodeDecodeError: sequence is invalid. """ value, length = u"", 0 it = iter(decoder(data)) while True: try: value += next(it) length += 1 except StopIteration: break except UnicodeDecodeError as e: if errors == "strict": raise e elif errors == "ignore": pass elif errors == "replace": value += u"\uFFFD" length += 1 return value, length
python
def decode_modified_utf8(data, errors="strict"): """ Decodes a sequence of bytes to a unicode text and length using Modified UTF-8. This function is designed to be used with Python ``codecs`` module. :param data: a string of bytes in Modified UTF-8 :param errors: handle decoding errors :return: unicode text and length :raises UnicodeDecodeError: sequence is invalid. """ value, length = u"", 0 it = iter(decoder(data)) while True: try: value += next(it) length += 1 except StopIteration: break except UnicodeDecodeError as e: if errors == "strict": raise e elif errors == "ignore": pass elif errors == "replace": value += u"\uFFFD" length += 1 return value, length
[ "def", "decode_modified_utf8", "(", "data", ",", "errors", "=", "\"strict\"", ")", ":", "value", ",", "length", "=", "u\"\"", ",", "0", "it", "=", "iter", "(", "decoder", "(", "data", ")", ")", "while", "True", ":", "try", ":", "value", "+=", "next", "(", "it", ")", "length", "+=", "1", "except", "StopIteration", ":", "break", "except", "UnicodeDecodeError", "as", "e", ":", "if", "errors", "==", "\"strict\"", ":", "raise", "e", "elif", "errors", "==", "\"ignore\"", ":", "pass", "elif", "errors", "==", "\"replace\"", ":", "value", "+=", "u\"\\uFFFD\"", "length", "+=", "1", "return", "value", ",", "length" ]
Decodes a sequence of bytes to a unicode text and length using Modified UTF-8. This function is designed to be used with Python ``codecs`` module. :param data: a string of bytes in Modified UTF-8 :param errors: handle decoding errors :return: unicode text and length :raises UnicodeDecodeError: sequence is invalid.
[ "Decodes", "a", "sequence", "of", "bytes", "to", "a", "unicode", "text", "and", "length", "using", "Modified", "UTF", "-", "8", ".", "This", "function", "is", "designed", "to", "be", "used", "with", "Python", "codecs", "module", "." ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/modifiedutf8.py#L163-L190
2,912
tcalmant/python-javaobj
javaobj/modifiedutf8.py
DecodeMap.apply
def apply(self, byte, value, data, i, count): """ Apply mask, compare to expected value, shift and return result. Eventually, this could become a ``reduce`` function. :param byte: The byte to compare :param value: The currently accumulated value. :param data: The data buffer, (array of bytes). :param i: The position within the data buffer. :param count: The position of this comparison. :return: A new value with the bits merged in. :raises UnicodeDecodeError: if marked bits don't match. """ if byte & self.mask == self.value: value <<= self.bits value |= byte & self.mask2 else: raise UnicodeDecodeError( NAME, data, i, i + count, "invalid {}-byte sequence".format(self.count) ) return value
python
def apply(self, byte, value, data, i, count): """ Apply mask, compare to expected value, shift and return result. Eventually, this could become a ``reduce`` function. :param byte: The byte to compare :param value: The currently accumulated value. :param data: The data buffer, (array of bytes). :param i: The position within the data buffer. :param count: The position of this comparison. :return: A new value with the bits merged in. :raises UnicodeDecodeError: if marked bits don't match. """ if byte & self.mask == self.value: value <<= self.bits value |= byte & self.mask2 else: raise UnicodeDecodeError( NAME, data, i, i + count, "invalid {}-byte sequence".format(self.count) ) return value
[ "def", "apply", "(", "self", ",", "byte", ",", "value", ",", "data", ",", "i", ",", "count", ")", ":", "if", "byte", "&", "self", ".", "mask", "==", "self", ".", "value", ":", "value", "<<=", "self", ".", "bits", "value", "|=", "byte", "&", "self", ".", "mask2", "else", ":", "raise", "UnicodeDecodeError", "(", "NAME", ",", "data", ",", "i", ",", "i", "+", "count", ",", "\"invalid {}-byte sequence\"", ".", "format", "(", "self", ".", "count", ")", ")", "return", "value" ]
Apply mask, compare to expected value, shift and return result. Eventually, this could become a ``reduce`` function. :param byte: The byte to compare :param value: The currently accumulated value. :param data: The data buffer, (array of bytes). :param i: The position within the data buffer. :param count: The position of this comparison. :return: A new value with the bits merged in. :raises UnicodeDecodeError: if marked bits don't match.
[ "Apply", "mask", "compare", "to", "expected", "value", "shift", "and", "return", "result", ".", "Eventually", "this", "could", "become", "a", "reduce", "function", "." ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/modifiedutf8.py#L55-L75
2,913
tcalmant/python-javaobj
javaobj/core.py
load
def load(file_object, *transformers, **kwargs): """ Deserializes Java primitive data and objects serialized using ObjectOutputStream from a file-like object. :param file_object: A file-like object :param transformers: Custom transformers to use :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining :return: The deserialized object """ # Read keyword argument ignore_remaining_data = kwargs.get("ignore_remaining_data", False) marshaller = JavaObjectUnmarshaller( file_object, kwargs.get("use_numpy_arrays", False) ) # Add custom transformers first for transformer in transformers: marshaller.add_transformer(transformer) marshaller.add_transformer(DefaultObjectTransformer()) # Read the file object return marshaller.readObject(ignore_remaining_data=ignore_remaining_data)
python
def load(file_object, *transformers, **kwargs): """ Deserializes Java primitive data and objects serialized using ObjectOutputStream from a file-like object. :param file_object: A file-like object :param transformers: Custom transformers to use :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining :return: The deserialized object """ # Read keyword argument ignore_remaining_data = kwargs.get("ignore_remaining_data", False) marshaller = JavaObjectUnmarshaller( file_object, kwargs.get("use_numpy_arrays", False) ) # Add custom transformers first for transformer in transformers: marshaller.add_transformer(transformer) marshaller.add_transformer(DefaultObjectTransformer()) # Read the file object return marshaller.readObject(ignore_remaining_data=ignore_remaining_data)
[ "def", "load", "(", "file_object", ",", "*", "transformers", ",", "*", "*", "kwargs", ")", ":", "# Read keyword argument", "ignore_remaining_data", "=", "kwargs", ".", "get", "(", "\"ignore_remaining_data\"", ",", "False", ")", "marshaller", "=", "JavaObjectUnmarshaller", "(", "file_object", ",", "kwargs", ".", "get", "(", "\"use_numpy_arrays\"", ",", "False", ")", ")", "# Add custom transformers first", "for", "transformer", "in", "transformers", ":", "marshaller", ".", "add_transformer", "(", "transformer", ")", "marshaller", ".", "add_transformer", "(", "DefaultObjectTransformer", "(", ")", ")", "# Read the file object", "return", "marshaller", ".", "readObject", "(", "ignore_remaining_data", "=", "ignore_remaining_data", ")" ]
Deserializes Java primitive data and objects serialized using ObjectOutputStream from a file-like object. :param file_object: A file-like object :param transformers: Custom transformers to use :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining :return: The deserialized object
[ "Deserializes", "Java", "primitive", "data", "and", "objects", "serialized", "using", "ObjectOutputStream", "from", "a", "file", "-", "like", "object", "." ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L101-L125
2,914
tcalmant/python-javaobj
javaobj/core.py
loads
def loads(string, *transformers, **kwargs): """ Deserializes Java objects and primitive data serialized using ObjectOutputStream from a string. :param string: A Java data string :param transformers: Custom transformers to use :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining :return: The deserialized object """ # Read keyword argument ignore_remaining_data = kwargs.get("ignore_remaining_data", False) # Reuse the load method (avoid code duplication) return load( BytesIO(string), *transformers, ignore_remaining_data=ignore_remaining_data )
python
def loads(string, *transformers, **kwargs): """ Deserializes Java objects and primitive data serialized using ObjectOutputStream from a string. :param string: A Java data string :param transformers: Custom transformers to use :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining :return: The deserialized object """ # Read keyword argument ignore_remaining_data = kwargs.get("ignore_remaining_data", False) # Reuse the load method (avoid code duplication) return load( BytesIO(string), *transformers, ignore_remaining_data=ignore_remaining_data )
[ "def", "loads", "(", "string", ",", "*", "transformers", ",", "*", "*", "kwargs", ")", ":", "# Read keyword argument", "ignore_remaining_data", "=", "kwargs", ".", "get", "(", "\"ignore_remaining_data\"", ",", "False", ")", "# Reuse the load method (avoid code duplication)", "return", "load", "(", "BytesIO", "(", "string", ")", ",", "*", "transformers", ",", "ignore_remaining_data", "=", "ignore_remaining_data", ")" ]
Deserializes Java objects and primitive data serialized using ObjectOutputStream from a string. :param string: A Java data string :param transformers: Custom transformers to use :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining :return: The deserialized object
[ "Deserializes", "Java", "objects", "and", "primitive", "data", "serialized", "using", "ObjectOutputStream", "from", "a", "string", "." ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L128-L145
2,915
tcalmant/python-javaobj
javaobj/core.py
read
def read(data, fmt_str): """ Reads input bytes and extract the given structure. Returns both the read elements and the remaining data :param data: Data as bytes :param fmt_str: Struct unpack format string :return: A tuple (results as tuple, remaining data) """ size = struct.calcsize(fmt_str) return struct.unpack(fmt_str, data[:size]), data[size:]
python
def read(data, fmt_str): """ Reads input bytes and extract the given structure. Returns both the read elements and the remaining data :param data: Data as bytes :param fmt_str: Struct unpack format string :return: A tuple (results as tuple, remaining data) """ size = struct.calcsize(fmt_str) return struct.unpack(fmt_str, data[:size]), data[size:]
[ "def", "read", "(", "data", ",", "fmt_str", ")", ":", "size", "=", "struct", ".", "calcsize", "(", "fmt_str", ")", "return", "struct", ".", "unpack", "(", "fmt_str", ",", "data", "[", ":", "size", "]", ")", ",", "data", "[", "size", ":", "]" ]
Reads input bytes and extract the given structure. Returns both the read elements and the remaining data :param data: Data as bytes :param fmt_str: Struct unpack format string :return: A tuple (results as tuple, remaining data)
[ "Reads", "input", "bytes", "and", "extract", "the", "given", "structure", ".", "Returns", "both", "the", "read", "elements", "and", "the", "remaining", "data" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1666-L1676
2,916
tcalmant/python-javaobj
javaobj/core.py
OpCodeDebug.flags
def flags(flags): """ Returns the names of the class description flags found in the given integer :param flags: A class description flag entry :return: The flags names as a single string """ names = sorted( descr for key, descr in OpCodeDebug.STREAM_CONSTANT.items() if key & flags ) return ", ".join(names)
python
def flags(flags): """ Returns the names of the class description flags found in the given integer :param flags: A class description flag entry :return: The flags names as a single string """ names = sorted( descr for key, descr in OpCodeDebug.STREAM_CONSTANT.items() if key & flags ) return ", ".join(names)
[ "def", "flags", "(", "flags", ")", ":", "names", "=", "sorted", "(", "descr", "for", "key", ",", "descr", "in", "OpCodeDebug", ".", "STREAM_CONSTANT", ".", "items", "(", ")", "if", "key", "&", "flags", ")", "return", "\", \"", ".", "join", "(", "names", ")" ]
Returns the names of the class description flags found in the given integer :param flags: A class description flag entry :return: The flags names as a single string
[ "Returns", "the", "names", "of", "the", "class", "description", "flags", "found", "in", "the", "given", "integer" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L457-L468
2,917
tcalmant/python-javaobj
javaobj/core.py
JavaObjectUnmarshaller._readStreamHeader
def _readStreamHeader(self): """ Reads the magic header of a Java serialization stream :raise IOError: Invalid magic header (not a Java stream) """ (magic, version) = self._readStruct(">HH") if magic != self.STREAM_MAGIC or version != self.STREAM_VERSION: raise IOError( "The stream is not java serialized object. " "Invalid stream header: {0:04X}{1:04X}".format(magic, version) )
python
def _readStreamHeader(self): """ Reads the magic header of a Java serialization stream :raise IOError: Invalid magic header (not a Java stream) """ (magic, version) = self._readStruct(">HH") if magic != self.STREAM_MAGIC or version != self.STREAM_VERSION: raise IOError( "The stream is not java serialized object. " "Invalid stream header: {0:04X}{1:04X}".format(magic, version) )
[ "def", "_readStreamHeader", "(", "self", ")", ":", "(", "magic", ",", "version", ")", "=", "self", ".", "_readStruct", "(", "\">HH\"", ")", "if", "magic", "!=", "self", ".", "STREAM_MAGIC", "or", "version", "!=", "self", ".", "STREAM_VERSION", ":", "raise", "IOError", "(", "\"The stream is not java serialized object. \"", "\"Invalid stream header: {0:04X}{1:04X}\"", ".", "format", "(", "magic", ",", "version", ")", ")" ]
Reads the magic header of a Java serialization stream :raise IOError: Invalid magic header (not a Java stream)
[ "Reads", "the", "magic", "header", "of", "a", "Java", "serialization", "stream" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L559-L570
2,918
tcalmant/python-javaobj
javaobj/core.py
JavaObjectUnmarshaller._read_and_exec_opcode
def _read_and_exec_opcode(self, ident=0, expect=None): """ Reads the next opcode, and executes its handler :param ident: Log identation level :param expect: A list of expected opcodes :return: A tuple: (opcode, result of the handler) :raise IOError: Read opcode is not one of the expected ones :raise RuntimeError: Unknown opcode """ position = self.object_stream.tell() (opid,) = self._readStruct(">B") log_debug( "OpCode: 0x{0:X} -- {1} (at offset 0x{2:X})".format( opid, OpCodeDebug.op_id(opid), position ), ident, ) if expect and opid not in expect: raise IOError( "Unexpected opcode 0x{0:X} -- {1} (at offset 0x{2:X})".format( opid, OpCodeDebug.op_id(opid), position ) ) try: handler = self.opmap[opid] except KeyError: raise RuntimeError( "Unknown OpCode in the stream: 0x{0:X} (at offset 0x{1:X})".format( opid, position ) ) else: return opid, handler(ident=ident)
python
def _read_and_exec_opcode(self, ident=0, expect=None): """ Reads the next opcode, and executes its handler :param ident: Log identation level :param expect: A list of expected opcodes :return: A tuple: (opcode, result of the handler) :raise IOError: Read opcode is not one of the expected ones :raise RuntimeError: Unknown opcode """ position = self.object_stream.tell() (opid,) = self._readStruct(">B") log_debug( "OpCode: 0x{0:X} -- {1} (at offset 0x{2:X})".format( opid, OpCodeDebug.op_id(opid), position ), ident, ) if expect and opid not in expect: raise IOError( "Unexpected opcode 0x{0:X} -- {1} (at offset 0x{2:X})".format( opid, OpCodeDebug.op_id(opid), position ) ) try: handler = self.opmap[opid] except KeyError: raise RuntimeError( "Unknown OpCode in the stream: 0x{0:X} (at offset 0x{1:X})".format( opid, position ) ) else: return opid, handler(ident=ident)
[ "def", "_read_and_exec_opcode", "(", "self", ",", "ident", "=", "0", ",", "expect", "=", "None", ")", ":", "position", "=", "self", ".", "object_stream", ".", "tell", "(", ")", "(", "opid", ",", ")", "=", "self", ".", "_readStruct", "(", "\">B\"", ")", "log_debug", "(", "\"OpCode: 0x{0:X} -- {1} (at offset 0x{2:X})\"", ".", "format", "(", "opid", ",", "OpCodeDebug", ".", "op_id", "(", "opid", ")", ",", "position", ")", ",", "ident", ",", ")", "if", "expect", "and", "opid", "not", "in", "expect", ":", "raise", "IOError", "(", "\"Unexpected opcode 0x{0:X} -- {1} (at offset 0x{2:X})\"", ".", "format", "(", "opid", ",", "OpCodeDebug", ".", "op_id", "(", "opid", ")", ",", "position", ")", ")", "try", ":", "handler", "=", "self", ".", "opmap", "[", "opid", "]", "except", "KeyError", ":", "raise", "RuntimeError", "(", "\"Unknown OpCode in the stream: 0x{0:X} (at offset 0x{1:X})\"", ".", "format", "(", "opid", ",", "position", ")", ")", "else", ":", "return", "opid", ",", "handler", "(", "ident", "=", "ident", ")" ]
Reads the next opcode, and executes its handler :param ident: Log identation level :param expect: A list of expected opcodes :return: A tuple: (opcode, result of the handler) :raise IOError: Read opcode is not one of the expected ones :raise RuntimeError: Unknown opcode
[ "Reads", "the", "next", "opcode", "and", "executes", "its", "handler" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L572-L607
2,919
tcalmant/python-javaobj
javaobj/core.py
JavaObjectUnmarshaller.do_string
def do_string(self, parent=None, ident=0): """ Handles a TC_STRING opcode :param parent: :param ident: Log indentation level :return: A string """ log_debug("[string]", ident) ba = JavaString(self._readString()) self._add_reference(ba, ident) return ba
python
def do_string(self, parent=None, ident=0): """ Handles a TC_STRING opcode :param parent: :param ident: Log indentation level :return: A string """ log_debug("[string]", ident) ba = JavaString(self._readString()) self._add_reference(ba, ident) return ba
[ "def", "do_string", "(", "self", ",", "parent", "=", "None", ",", "ident", "=", "0", ")", ":", "log_debug", "(", "\"[string]\"", ",", "ident", ")", "ba", "=", "JavaString", "(", "self", ".", "_readString", "(", ")", ")", "self", ".", "_add_reference", "(", "ba", ",", "ident", ")", "return", "ba" ]
Handles a TC_STRING opcode :param parent: :param ident: Log indentation level :return: A string
[ "Handles", "a", "TC_STRING", "opcode" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L941-L952
2,920
tcalmant/python-javaobj
javaobj/core.py
JavaObjectUnmarshaller.do_array
def do_array(self, parent=None, ident=0): """ Handles a TC_ARRAY opcode :param parent: :param ident: Log indentation level :return: A list of deserialized objects """ # TC_ARRAY classDesc newHandle (int)<size> values[size] log_debug("[array]", ident) _, classdesc = self._read_and_exec_opcode( ident=ident + 1, expect=( self.TC_CLASSDESC, self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE, ), ) array = JavaArray(classdesc) self._add_reference(array, ident) (size,) = self._readStruct(">i") log_debug("size: {0}".format(size), ident) type_char = classdesc.name[0] assert type_char == self.TYPE_ARRAY type_char = classdesc.name[1] if type_char == self.TYPE_OBJECT or type_char == self.TYPE_ARRAY: for _ in range(size): _, res = self._read_and_exec_opcode(ident=ident + 1) log_debug("Object value: {0}".format(res), ident) array.append(res) elif type_char == self.TYPE_BYTE: array = JavaByteArray(self.object_stream.read(size), classdesc) elif self.use_numpy_arrays: import numpy array = numpy.fromfile( self.object_stream, dtype=JavaObjectConstants.NUMPY_TYPE_MAP[type_char], count=size, ) else: for _ in range(size): res = self._read_value(type_char, ident) log_debug("Native value: {0}".format(repr(res)), ident) array.append(res) return array
python
def do_array(self, parent=None, ident=0): """ Handles a TC_ARRAY opcode :param parent: :param ident: Log indentation level :return: A list of deserialized objects """ # TC_ARRAY classDesc newHandle (int)<size> values[size] log_debug("[array]", ident) _, classdesc = self._read_and_exec_opcode( ident=ident + 1, expect=( self.TC_CLASSDESC, self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE, ), ) array = JavaArray(classdesc) self._add_reference(array, ident) (size,) = self._readStruct(">i") log_debug("size: {0}".format(size), ident) type_char = classdesc.name[0] assert type_char == self.TYPE_ARRAY type_char = classdesc.name[1] if type_char == self.TYPE_OBJECT or type_char == self.TYPE_ARRAY: for _ in range(size): _, res = self._read_and_exec_opcode(ident=ident + 1) log_debug("Object value: {0}".format(res), ident) array.append(res) elif type_char == self.TYPE_BYTE: array = JavaByteArray(self.object_stream.read(size), classdesc) elif self.use_numpy_arrays: import numpy array = numpy.fromfile( self.object_stream, dtype=JavaObjectConstants.NUMPY_TYPE_MAP[type_char], count=size, ) else: for _ in range(size): res = self._read_value(type_char, ident) log_debug("Native value: {0}".format(repr(res)), ident) array.append(res) return array
[ "def", "do_array", "(", "self", ",", "parent", "=", "None", ",", "ident", "=", "0", ")", ":", "# TC_ARRAY classDesc newHandle (int)<size> values[size]", "log_debug", "(", "\"[array]\"", ",", "ident", ")", "_", ",", "classdesc", "=", "self", ".", "_read_and_exec_opcode", "(", "ident", "=", "ident", "+", "1", ",", "expect", "=", "(", "self", ".", "TC_CLASSDESC", ",", "self", ".", "TC_PROXYCLASSDESC", ",", "self", ".", "TC_NULL", ",", "self", ".", "TC_REFERENCE", ",", ")", ",", ")", "array", "=", "JavaArray", "(", "classdesc", ")", "self", ".", "_add_reference", "(", "array", ",", "ident", ")", "(", "size", ",", ")", "=", "self", ".", "_readStruct", "(", "\">i\"", ")", "log_debug", "(", "\"size: {0}\"", ".", "format", "(", "size", ")", ",", "ident", ")", "type_char", "=", "classdesc", ".", "name", "[", "0", "]", "assert", "type_char", "==", "self", ".", "TYPE_ARRAY", "type_char", "=", "classdesc", ".", "name", "[", "1", "]", "if", "type_char", "==", "self", ".", "TYPE_OBJECT", "or", "type_char", "==", "self", ".", "TYPE_ARRAY", ":", "for", "_", "in", "range", "(", "size", ")", ":", "_", ",", "res", "=", "self", ".", "_read_and_exec_opcode", "(", "ident", "=", "ident", "+", "1", ")", "log_debug", "(", "\"Object value: {0}\"", ".", "format", "(", "res", ")", ",", "ident", ")", "array", ".", "append", "(", "res", ")", "elif", "type_char", "==", "self", ".", "TYPE_BYTE", ":", "array", "=", "JavaByteArray", "(", "self", ".", "object_stream", ".", "read", "(", "size", ")", ",", "classdesc", ")", "elif", "self", ".", "use_numpy_arrays", ":", "import", "numpy", "array", "=", "numpy", ".", "fromfile", "(", "self", ".", "object_stream", ",", "dtype", "=", "JavaObjectConstants", ".", "NUMPY_TYPE_MAP", "[", "type_char", "]", ",", "count", "=", "size", ",", ")", "else", ":", "for", "_", "in", "range", "(", "size", ")", ":", "res", "=", "self", ".", "_read_value", "(", "type_char", ",", "ident", ")", "log_debug", "(", "\"Native value: {0}\"", ".", "format", "(", "repr", "(", "res", ")", ")", ",", "ident", ")", "array", ".", "append", "(", "res", ")", "return", "array" ]
Handles a TC_ARRAY opcode :param parent: :param ident: Log indentation level :return: A list of deserialized objects
[ "Handles", "a", "TC_ARRAY", "opcode" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L967-L1019
2,921
tcalmant/python-javaobj
javaobj/core.py
JavaObjectUnmarshaller.do_reference
def do_reference(self, parent=None, ident=0): """ Handles a TC_REFERENCE opcode :param parent: :param ident: Log indentation level :return: The referenced object """ (handle,) = self._readStruct(">L") log_debug("## Reference handle: 0x{0:X}".format(handle), ident) ref = self.references[handle - self.BASE_REFERENCE_IDX] log_debug("###-> Type: {0} - Value: {1}".format(type(ref), ref), ident) return ref
python
def do_reference(self, parent=None, ident=0): """ Handles a TC_REFERENCE opcode :param parent: :param ident: Log indentation level :return: The referenced object """ (handle,) = self._readStruct(">L") log_debug("## Reference handle: 0x{0:X}".format(handle), ident) ref = self.references[handle - self.BASE_REFERENCE_IDX] log_debug("###-> Type: {0} - Value: {1}".format(type(ref), ref), ident) return ref
[ "def", "do_reference", "(", "self", ",", "parent", "=", "None", ",", "ident", "=", "0", ")", ":", "(", "handle", ",", ")", "=", "self", ".", "_readStruct", "(", "\">L\"", ")", "log_debug", "(", "\"## Reference handle: 0x{0:X}\"", ".", "format", "(", "handle", ")", ",", "ident", ")", "ref", "=", "self", ".", "references", "[", "handle", "-", "self", ".", "BASE_REFERENCE_IDX", "]", "log_debug", "(", "\"###-> Type: {0} - Value: {1}\"", ".", "format", "(", "type", "(", "ref", ")", ",", "ref", ")", ",", "ident", ")", "return", "ref" ]
Handles a TC_REFERENCE opcode :param parent: :param ident: Log indentation level :return: The referenced object
[ "Handles", "a", "TC_REFERENCE", "opcode" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1021-L1033
2,922
tcalmant/python-javaobj
javaobj/core.py
JavaObjectUnmarshaller.do_enum
def do_enum(self, parent=None, ident=0): """ Handles a TC_ENUM opcode :param parent: :param ident: Log indentation level :return: A JavaEnum object """ # TC_ENUM classDesc newHandle enumConstantName enum = JavaEnum() _, classdesc = self._read_and_exec_opcode( ident=ident + 1, expect=( self.TC_CLASSDESC, self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE, ), ) enum.classdesc = classdesc self._add_reference(enum, ident) _, enumConstantName = self._read_and_exec_opcode( ident=ident + 1, expect=(self.TC_STRING, self.TC_REFERENCE) ) enum.constant = enumConstantName return enum
python
def do_enum(self, parent=None, ident=0): """ Handles a TC_ENUM opcode :param parent: :param ident: Log indentation level :return: A JavaEnum object """ # TC_ENUM classDesc newHandle enumConstantName enum = JavaEnum() _, classdesc = self._read_and_exec_opcode( ident=ident + 1, expect=( self.TC_CLASSDESC, self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE, ), ) enum.classdesc = classdesc self._add_reference(enum, ident) _, enumConstantName = self._read_and_exec_opcode( ident=ident + 1, expect=(self.TC_STRING, self.TC_REFERENCE) ) enum.constant = enumConstantName return enum
[ "def", "do_enum", "(", "self", ",", "parent", "=", "None", ",", "ident", "=", "0", ")", ":", "# TC_ENUM classDesc newHandle enumConstantName", "enum", "=", "JavaEnum", "(", ")", "_", ",", "classdesc", "=", "self", ".", "_read_and_exec_opcode", "(", "ident", "=", "ident", "+", "1", ",", "expect", "=", "(", "self", ".", "TC_CLASSDESC", ",", "self", ".", "TC_PROXYCLASSDESC", ",", "self", ".", "TC_NULL", ",", "self", ".", "TC_REFERENCE", ",", ")", ",", ")", "enum", ".", "classdesc", "=", "classdesc", "self", ".", "_add_reference", "(", "enum", ",", "ident", ")", "_", ",", "enumConstantName", "=", "self", ".", "_read_and_exec_opcode", "(", "ident", "=", "ident", "+", "1", ",", "expect", "=", "(", "self", ".", "TC_STRING", ",", "self", ".", "TC_REFERENCE", ")", ")", "enum", ".", "constant", "=", "enumConstantName", "return", "enum" ]
Handles a TC_ENUM opcode :param parent: :param ident: Log indentation level :return: A JavaEnum object
[ "Handles", "a", "TC_ENUM", "opcode" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1046-L1071
2,923
tcalmant/python-javaobj
javaobj/core.py
JavaObjectUnmarshaller._create_hexdump
def _create_hexdump(src, start_offset=0, length=16): """ Prepares an hexadecimal dump string :param src: A string containing binary data :param start_offset: The start offset of the source :param length: Length of a dump line :return: A dump string """ FILTER = "".join((len(repr(chr(x))) == 3) and chr(x) or "." for x in range(256)) pattern = "{{0:04X}} {{1:<{0}}} {{2}}\n".format(length * 3) # Convert raw data to str (Python 3 compatibility) src = to_str(src, "latin-1") result = [] for i in range(0, len(src), length): s = src[i : i + length] hexa = " ".join("{0:02X}".format(ord(x)) for x in s) printable = s.translate(FILTER) result.append(pattern.format(i + start_offset, hexa, printable)) return "".join(result)
python
def _create_hexdump(src, start_offset=0, length=16): """ Prepares an hexadecimal dump string :param src: A string containing binary data :param start_offset: The start offset of the source :param length: Length of a dump line :return: A dump string """ FILTER = "".join((len(repr(chr(x))) == 3) and chr(x) or "." for x in range(256)) pattern = "{{0:04X}} {{1:<{0}}} {{2}}\n".format(length * 3) # Convert raw data to str (Python 3 compatibility) src = to_str(src, "latin-1") result = [] for i in range(0, len(src), length): s = src[i : i + length] hexa = " ".join("{0:02X}".format(ord(x)) for x in s) printable = s.translate(FILTER) result.append(pattern.format(i + start_offset, hexa, printable)) return "".join(result)
[ "def", "_create_hexdump", "(", "src", ",", "start_offset", "=", "0", ",", "length", "=", "16", ")", ":", "FILTER", "=", "\"\"", ".", "join", "(", "(", "len", "(", "repr", "(", "chr", "(", "x", ")", ")", ")", "==", "3", ")", "and", "chr", "(", "x", ")", "or", "\".\"", "for", "x", "in", "range", "(", "256", ")", ")", "pattern", "=", "\"{{0:04X}} {{1:<{0}}} {{2}}\\n\"", ".", "format", "(", "length", "*", "3", ")", "# Convert raw data to str (Python 3 compatibility)", "src", "=", "to_str", "(", "src", ",", "\"latin-1\"", ")", "result", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "src", ")", ",", "length", ")", ":", "s", "=", "src", "[", "i", ":", "i", "+", "length", "]", "hexa", "=", "\" \"", ".", "join", "(", "\"{0:02X}\"", ".", "format", "(", "ord", "(", "x", ")", ")", "for", "x", "in", "s", ")", "printable", "=", "s", ".", "translate", "(", "FILTER", ")", "result", ".", "append", "(", "pattern", ".", "format", "(", "i", "+", "start_offset", ",", "hexa", ",", "printable", ")", ")", "return", "\"\"", ".", "join", "(", "result", ")" ]
Prepares an hexadecimal dump string :param src: A string containing binary data :param start_offset: The start offset of the source :param length: Length of a dump line :return: A dump string
[ "Prepares", "an", "hexadecimal", "dump", "string" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1074-L1096
2,924
tcalmant/python-javaobj
javaobj/core.py
JavaObjectUnmarshaller._convert_char_to_type
def _convert_char_to_type(self, type_char): """ Ensures a read character is a typecode. :param type_char: Read typecode :return: The typecode as a string (using chr) :raise RuntimeError: Unknown typecode """ typecode = type_char if type(type_char) is int: typecode = chr(type_char) if typecode in self.TYPECODES_LIST: return typecode else: raise RuntimeError( "Typecode {0} ({1}) isn't supported.".format(type_char, typecode) )
python
def _convert_char_to_type(self, type_char): """ Ensures a read character is a typecode. :param type_char: Read typecode :return: The typecode as a string (using chr) :raise RuntimeError: Unknown typecode """ typecode = type_char if type(type_char) is int: typecode = chr(type_char) if typecode in self.TYPECODES_LIST: return typecode else: raise RuntimeError( "Typecode {0} ({1}) isn't supported.".format(type_char, typecode) )
[ "def", "_convert_char_to_type", "(", "self", ",", "type_char", ")", ":", "typecode", "=", "type_char", "if", "type", "(", "type_char", ")", "is", "int", ":", "typecode", "=", "chr", "(", "type_char", ")", "if", "typecode", "in", "self", ".", "TYPECODES_LIST", ":", "return", "typecode", "else", ":", "raise", "RuntimeError", "(", "\"Typecode {0} ({1}) isn't supported.\"", ".", "format", "(", "type_char", ",", "typecode", ")", ")" ]
Ensures a read character is a typecode. :param type_char: Read typecode :return: The typecode as a string (using chr) :raise RuntimeError: Unknown typecode
[ "Ensures", "a", "read", "character", "is", "a", "typecode", "." ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1140-L1157
2,925
tcalmant/python-javaobj
javaobj/core.py
JavaObjectUnmarshaller._add_reference
def _add_reference(self, obj, ident=0): """ Adds a read reference to the marshaler storage :param obj: Reference to add :param ident: Log indentation level """ log_debug( "## New reference handle 0x{0:X}: {1} -> {2}".format( len(self.references) + self.BASE_REFERENCE_IDX, type(obj).__name__, repr(obj), ), ident, ) self.references.append(obj)
python
def _add_reference(self, obj, ident=0): """ Adds a read reference to the marshaler storage :param obj: Reference to add :param ident: Log indentation level """ log_debug( "## New reference handle 0x{0:X}: {1} -> {2}".format( len(self.references) + self.BASE_REFERENCE_IDX, type(obj).__name__, repr(obj), ), ident, ) self.references.append(obj)
[ "def", "_add_reference", "(", "self", ",", "obj", ",", "ident", "=", "0", ")", ":", "log_debug", "(", "\"## New reference handle 0x{0:X}: {1} -> {2}\"", ".", "format", "(", "len", "(", "self", ".", "references", ")", "+", "self", ".", "BASE_REFERENCE_IDX", ",", "type", "(", "obj", ")", ".", "__name__", ",", "repr", "(", "obj", ")", ",", ")", ",", "ident", ",", ")", "self", ".", "references", ".", "append", "(", "obj", ")" ]
Adds a read reference to the marshaler storage :param obj: Reference to add :param ident: Log indentation level
[ "Adds", "a", "read", "reference", "to", "the", "marshaler", "storage" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1159-L1174
2,926
tcalmant/python-javaobj
javaobj/core.py
JavaObjectUnmarshaller._oops_dump_state
def _oops_dump_state(self, ignore_remaining_data=False): """ Log a deserialization error :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining """ log_error("==Oops state dump" + "=" * (30 - 17)) log_error("References: {0}".format(self.references)) log_error("Stream seeking back at -16 byte (2nd line is an actual position!):") # Do not use a keyword argument self.object_stream.seek(-16, os.SEEK_CUR) position = self.object_stream.tell() the_rest = self.object_stream.read() if not ignore_remaining_data and len(the_rest): log_error( "Warning!!!!: Stream still has {0} bytes left:\n{1}".format( len(the_rest), self._create_hexdump(the_rest, position) ) ) log_error("=" * 30)
python
def _oops_dump_state(self, ignore_remaining_data=False): """ Log a deserialization error :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining """ log_error("==Oops state dump" + "=" * (30 - 17)) log_error("References: {0}".format(self.references)) log_error("Stream seeking back at -16 byte (2nd line is an actual position!):") # Do not use a keyword argument self.object_stream.seek(-16, os.SEEK_CUR) position = self.object_stream.tell() the_rest = self.object_stream.read() if not ignore_remaining_data and len(the_rest): log_error( "Warning!!!!: Stream still has {0} bytes left:\n{1}".format( len(the_rest), self._create_hexdump(the_rest, position) ) ) log_error("=" * 30)
[ "def", "_oops_dump_state", "(", "self", ",", "ignore_remaining_data", "=", "False", ")", ":", "log_error", "(", "\"==Oops state dump\"", "+", "\"=\"", "*", "(", "30", "-", "17", ")", ")", "log_error", "(", "\"References: {0}\"", ".", "format", "(", "self", ".", "references", ")", ")", "log_error", "(", "\"Stream seeking back at -16 byte (2nd line is an actual position!):\"", ")", "# Do not use a keyword argument", "self", ".", "object_stream", ".", "seek", "(", "-", "16", ",", "os", ".", "SEEK_CUR", ")", "position", "=", "self", ".", "object_stream", ".", "tell", "(", ")", "the_rest", "=", "self", ".", "object_stream", ".", "read", "(", ")", "if", "not", "ignore_remaining_data", "and", "len", "(", "the_rest", ")", ":", "log_error", "(", "\"Warning!!!!: Stream still has {0} bytes left:\\n{1}\"", ".", "format", "(", "len", "(", "the_rest", ")", ",", "self", ".", "_create_hexdump", "(", "the_rest", ",", "position", ")", ")", ")", "log_error", "(", "\"=\"", "*", "30", ")" ]
Log a deserialization error :param ignore_remaining_data: If True, don't log an error when unused trailing bytes are remaining
[ "Log", "a", "deserialization", "error" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1176-L1199
2,927
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller.dump
def dump(self, obj): """ Dumps the given object in the Java serialization format """ self.references = [] self.object_obj = obj self.object_stream = BytesIO() self._writeStreamHeader() self.writeObject(obj) return self.object_stream.getvalue()
python
def dump(self, obj): """ Dumps the given object in the Java serialization format """ self.references = [] self.object_obj = obj self.object_stream = BytesIO() self._writeStreamHeader() self.writeObject(obj) return self.object_stream.getvalue()
[ "def", "dump", "(", "self", ",", "obj", ")", ":", "self", ".", "references", "=", "[", "]", "self", ".", "object_obj", "=", "obj", "self", ".", "object_stream", "=", "BytesIO", "(", ")", "self", ".", "_writeStreamHeader", "(", ")", "self", ".", "writeObject", "(", "obj", ")", "return", "self", ".", "object_stream", ".", "getvalue", "(", ")" ]
Dumps the given object in the Java serialization format
[ "Dumps", "the", "given", "object", "in", "the", "Java", "serialization", "format" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1229-L1238
2,928
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller.writeObject
def writeObject(self, obj): """ Appends an object to the serialization stream :param obj: A string or a deserialized Java object :raise RuntimeError: Unsupported type """ log_debug("Writing object of type {0}".format(type(obj).__name__)) if isinstance(obj, JavaArray): # Deserialized Java array self.write_array(obj) elif isinstance(obj, JavaEnum): # Deserialized Java Enum self.write_enum(obj) elif isinstance(obj, JavaObject): # Deserialized Java object self.write_object(obj) elif isinstance(obj, JavaString): # Deserialized String self.write_string(obj) elif isinstance(obj, JavaClass): # Java class self.write_class(obj) elif obj is None: # Null self.write_null() elif type(obj) is str: # String value self.write_blockdata(obj) else: # Unhandled type raise RuntimeError( "Object serialization of type {0} is not " "supported.".format(type(obj)) )
python
def writeObject(self, obj): """ Appends an object to the serialization stream :param obj: A string or a deserialized Java object :raise RuntimeError: Unsupported type """ log_debug("Writing object of type {0}".format(type(obj).__name__)) if isinstance(obj, JavaArray): # Deserialized Java array self.write_array(obj) elif isinstance(obj, JavaEnum): # Deserialized Java Enum self.write_enum(obj) elif isinstance(obj, JavaObject): # Deserialized Java object self.write_object(obj) elif isinstance(obj, JavaString): # Deserialized String self.write_string(obj) elif isinstance(obj, JavaClass): # Java class self.write_class(obj) elif obj is None: # Null self.write_null() elif type(obj) is str: # String value self.write_blockdata(obj) else: # Unhandled type raise RuntimeError( "Object serialization of type {0} is not " "supported.".format(type(obj)) )
[ "def", "writeObject", "(", "self", ",", "obj", ")", ":", "log_debug", "(", "\"Writing object of type {0}\"", ".", "format", "(", "type", "(", "obj", ")", ".", "__name__", ")", ")", "if", "isinstance", "(", "obj", ",", "JavaArray", ")", ":", "# Deserialized Java array", "self", ".", "write_array", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "JavaEnum", ")", ":", "# Deserialized Java Enum", "self", ".", "write_enum", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "JavaObject", ")", ":", "# Deserialized Java object", "self", ".", "write_object", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "JavaString", ")", ":", "# Deserialized String", "self", ".", "write_string", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "JavaClass", ")", ":", "# Java class", "self", ".", "write_class", "(", "obj", ")", "elif", "obj", "is", "None", ":", "# Null", "self", ".", "write_null", "(", ")", "elif", "type", "(", "obj", ")", "is", "str", ":", "# String value", "self", ".", "write_blockdata", "(", "obj", ")", "else", ":", "# Unhandled type", "raise", "RuntimeError", "(", "\"Object serialization of type {0} is not \"", "\"supported.\"", ".", "format", "(", "type", "(", "obj", ")", ")", ")" ]
Appends an object to the serialization stream :param obj: A string or a deserialized Java object :raise RuntimeError: Unsupported type
[ "Appends", "an", "object", "to", "the", "serialization", "stream" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1246-L1280
2,929
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller._writeString
def _writeString(self, obj, use_reference=True): """ Appends a string to the serialization stream :param obj: String to serialize :param use_reference: If True, allow writing a reference """ # TODO: Convert to "modified UTF-8" # http://docs.oracle.com/javase/7/docs/api/java/io/DataInput.html#modified-utf-8 string = to_bytes(obj, "utf-8") if use_reference and isinstance(obj, JavaString): try: idx = self.references.index(obj) except ValueError: # First appearance of the string self.references.append(obj) logging.debug( "*** Adding ref 0x%X for string: %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj, ) self._writeStruct(">H", 2, (len(string),)) self.object_stream.write(string) else: # Write a reference to the previous type logging.debug( "*** Reusing ref 0x%X for string: %s", idx + self.BASE_REFERENCE_IDX, obj, ) self.write_reference(idx) else: self._writeStruct(">H", 2, (len(string),)) self.object_stream.write(string)
python
def _writeString(self, obj, use_reference=True): """ Appends a string to the serialization stream :param obj: String to serialize :param use_reference: If True, allow writing a reference """ # TODO: Convert to "modified UTF-8" # http://docs.oracle.com/javase/7/docs/api/java/io/DataInput.html#modified-utf-8 string = to_bytes(obj, "utf-8") if use_reference and isinstance(obj, JavaString): try: idx = self.references.index(obj) except ValueError: # First appearance of the string self.references.append(obj) logging.debug( "*** Adding ref 0x%X for string: %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj, ) self._writeStruct(">H", 2, (len(string),)) self.object_stream.write(string) else: # Write a reference to the previous type logging.debug( "*** Reusing ref 0x%X for string: %s", idx + self.BASE_REFERENCE_IDX, obj, ) self.write_reference(idx) else: self._writeStruct(">H", 2, (len(string),)) self.object_stream.write(string)
[ "def", "_writeString", "(", "self", ",", "obj", ",", "use_reference", "=", "True", ")", ":", "# TODO: Convert to \"modified UTF-8\"", "# http://docs.oracle.com/javase/7/docs/api/java/io/DataInput.html#modified-utf-8", "string", "=", "to_bytes", "(", "obj", ",", "\"utf-8\"", ")", "if", "use_reference", "and", "isinstance", "(", "obj", ",", "JavaString", ")", ":", "try", ":", "idx", "=", "self", ".", "references", ".", "index", "(", "obj", ")", "except", "ValueError", ":", "# First appearance of the string", "self", ".", "references", ".", "append", "(", "obj", ")", "logging", ".", "debug", "(", "\"*** Adding ref 0x%X for string: %s\"", ",", "len", "(", "self", ".", "references", ")", "-", "1", "+", "self", ".", "BASE_REFERENCE_IDX", ",", "obj", ",", ")", "self", ".", "_writeStruct", "(", "\">H\"", ",", "2", ",", "(", "len", "(", "string", ")", ",", ")", ")", "self", ".", "object_stream", ".", "write", "(", "string", ")", "else", ":", "# Write a reference to the previous type", "logging", ".", "debug", "(", "\"*** Reusing ref 0x%X for string: %s\"", ",", "idx", "+", "self", ".", "BASE_REFERENCE_IDX", ",", "obj", ",", ")", "self", ".", "write_reference", "(", "idx", ")", "else", ":", "self", ".", "_writeStruct", "(", "\">H\"", ",", "2", ",", "(", "len", "(", "string", ")", ",", ")", ")", "self", ".", "object_stream", ".", "write", "(", "string", ")" ]
Appends a string to the serialization stream :param obj: String to serialize :param use_reference: If True, allow writing a reference
[ "Appends", "a", "string", "to", "the", "serialization", "stream" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1293-L1328
2,930
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller.write_string
def write_string(self, obj, use_reference=True): """ Writes a Java string with the TC_STRING type marker :param obj: The string to print :param use_reference: If True, allow writing a reference """ if use_reference and isinstance(obj, JavaString): try: idx = self.references.index(obj) except ValueError: # String is not referenced: let _writeString store it self._writeStruct(">B", 1, (self.TC_STRING,)) self._writeString(obj, use_reference) else: # Reuse the referenced string logging.debug( "*** Reusing ref 0x%X for String: %s", idx + self.BASE_REFERENCE_IDX, obj, ) self.write_reference(idx) else: # Don't use references self._writeStruct(">B", 1, (self.TC_STRING,)) self._writeString(obj, use_reference)
python
def write_string(self, obj, use_reference=True): """ Writes a Java string with the TC_STRING type marker :param obj: The string to print :param use_reference: If True, allow writing a reference """ if use_reference and isinstance(obj, JavaString): try: idx = self.references.index(obj) except ValueError: # String is not referenced: let _writeString store it self._writeStruct(">B", 1, (self.TC_STRING,)) self._writeString(obj, use_reference) else: # Reuse the referenced string logging.debug( "*** Reusing ref 0x%X for String: %s", idx + self.BASE_REFERENCE_IDX, obj, ) self.write_reference(idx) else: # Don't use references self._writeStruct(">B", 1, (self.TC_STRING,)) self._writeString(obj, use_reference)
[ "def", "write_string", "(", "self", ",", "obj", ",", "use_reference", "=", "True", ")", ":", "if", "use_reference", "and", "isinstance", "(", "obj", ",", "JavaString", ")", ":", "try", ":", "idx", "=", "self", ".", "references", ".", "index", "(", "obj", ")", "except", "ValueError", ":", "# String is not referenced: let _writeString store it", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "TC_STRING", ",", ")", ")", "self", ".", "_writeString", "(", "obj", ",", "use_reference", ")", "else", ":", "# Reuse the referenced string", "logging", ".", "debug", "(", "\"*** Reusing ref 0x%X for String: %s\"", ",", "idx", "+", "self", ".", "BASE_REFERENCE_IDX", ",", "obj", ",", ")", "self", ".", "write_reference", "(", "idx", ")", "else", ":", "# Don't use references", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "TC_STRING", ",", ")", ")", "self", ".", "_writeString", "(", "obj", ",", "use_reference", ")" ]
Writes a Java string with the TC_STRING type marker :param obj: The string to print :param use_reference: If True, allow writing a reference
[ "Writes", "a", "Java", "string", "with", "the", "TC_STRING", "type", "marker" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1330-L1355
2,931
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller.write_enum
def write_enum(self, obj): """ Writes an Enum value :param obj: A JavaEnum object """ # FIXME: the output doesn't have the same references as the real # serializable form self._writeStruct(">B", 1, (self.TC_ENUM,)) try: idx = self.references.index(obj) except ValueError: # New reference self.references.append(obj) logging.debug( "*** Adding ref 0x%X for enum: %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj, ) self.write_classdesc(obj.get_class()) else: self.write_reference(idx) self.write_string(obj.constant)
python
def write_enum(self, obj): """ Writes an Enum value :param obj: A JavaEnum object """ # FIXME: the output doesn't have the same references as the real # serializable form self._writeStruct(">B", 1, (self.TC_ENUM,)) try: idx = self.references.index(obj) except ValueError: # New reference self.references.append(obj) logging.debug( "*** Adding ref 0x%X for enum: %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj, ) self.write_classdesc(obj.get_class()) else: self.write_reference(idx) self.write_string(obj.constant)
[ "def", "write_enum", "(", "self", ",", "obj", ")", ":", "# FIXME: the output doesn't have the same references as the real", "# serializable form", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "TC_ENUM", ",", ")", ")", "try", ":", "idx", "=", "self", ".", "references", ".", "index", "(", "obj", ")", "except", "ValueError", ":", "# New reference", "self", ".", "references", ".", "append", "(", "obj", ")", "logging", ".", "debug", "(", "\"*** Adding ref 0x%X for enum: %s\"", ",", "len", "(", "self", ".", "references", ")", "-", "1", "+", "self", ".", "BASE_REFERENCE_IDX", ",", "obj", ",", ")", "self", ".", "write_classdesc", "(", "obj", ".", "get_class", "(", ")", ")", "else", ":", "self", ".", "write_reference", "(", "idx", ")", "self", ".", "write_string", "(", "obj", ".", "constant", ")" ]
Writes an Enum value :param obj: A JavaEnum object
[ "Writes", "an", "Enum", "value" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1357-L1382
2,932
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller.write_blockdata
def write_blockdata(self, obj, parent=None): """ Appends a block of data to the serialization stream :param obj: String form of the data block """ if type(obj) is str: # Latin-1: keep bytes as is obj = to_bytes(obj, "latin-1") length = len(obj) if length <= 256: # Small block data # TC_BLOCKDATA (unsigned byte)<size> (byte)[size] self._writeStruct(">B", 1, (self.TC_BLOCKDATA,)) self._writeStruct(">B", 1, (length,)) else: # Large block data # TC_BLOCKDATALONG (unsigned int)<size> (byte)[size] self._writeStruct(">B", 1, (self.TC_BLOCKDATALONG,)) self._writeStruct(">I", 1, (length,)) self.object_stream.write(obj)
python
def write_blockdata(self, obj, parent=None): """ Appends a block of data to the serialization stream :param obj: String form of the data block """ if type(obj) is str: # Latin-1: keep bytes as is obj = to_bytes(obj, "latin-1") length = len(obj) if length <= 256: # Small block data # TC_BLOCKDATA (unsigned byte)<size> (byte)[size] self._writeStruct(">B", 1, (self.TC_BLOCKDATA,)) self._writeStruct(">B", 1, (length,)) else: # Large block data # TC_BLOCKDATALONG (unsigned int)<size> (byte)[size] self._writeStruct(">B", 1, (self.TC_BLOCKDATALONG,)) self._writeStruct(">I", 1, (length,)) self.object_stream.write(obj)
[ "def", "write_blockdata", "(", "self", ",", "obj", ",", "parent", "=", "None", ")", ":", "if", "type", "(", "obj", ")", "is", "str", ":", "# Latin-1: keep bytes as is", "obj", "=", "to_bytes", "(", "obj", ",", "\"latin-1\"", ")", "length", "=", "len", "(", "obj", ")", "if", "length", "<=", "256", ":", "# Small block data", "# TC_BLOCKDATA (unsigned byte)<size> (byte)[size]", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "TC_BLOCKDATA", ",", ")", ")", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "length", ",", ")", ")", "else", ":", "# Large block data", "# TC_BLOCKDATALONG (unsigned int)<size> (byte)[size]", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "TC_BLOCKDATALONG", ",", ")", ")", "self", ".", "_writeStruct", "(", "\">I\"", ",", "1", ",", "(", "length", ",", ")", ")", "self", ".", "object_stream", ".", "write", "(", "obj", ")" ]
Appends a block of data to the serialization stream :param obj: String form of the data block
[ "Appends", "a", "block", "of", "data", "to", "the", "serialization", "stream" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1384-L1406
2,933
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller.write_object
def write_object(self, obj, parent=None): """ Writes an object header to the serialization stream :param obj: Not yet used :param parent: Not yet used """ # Transform object for transformer in self.object_transformers: tmp_object = transformer.transform(obj) if tmp_object is not obj: obj = tmp_object break self._writeStruct(">B", 1, (self.TC_OBJECT,)) cls = obj.get_class() self.write_classdesc(cls) # Add reference self.references.append([]) logging.debug( "*** Adding ref 0x%X for object %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj, ) all_names = collections.deque() all_types = collections.deque() tmpcls = cls while tmpcls: all_names.extendleft(reversed(tmpcls.fields_names)) all_types.extendleft(reversed(tmpcls.fields_types)) tmpcls = tmpcls.superclass del tmpcls logging.debug("<=> Field names: %s", all_names) logging.debug("<=> Field types: %s", all_types) for field_name, field_type in zip(all_names, all_types): try: logging.debug( "Writing field %s (%s): %s", field_name, field_type, getattr(obj, field_name), ) self._write_value(field_type, getattr(obj, field_name)) except AttributeError as ex: log_error( "No attribute {0} for object {1}\nDir: {2}".format( ex, repr(obj), dir(obj) ) ) raise del all_names, all_types if ( cls.flags & self.SC_SERIALIZABLE and cls.flags & self.SC_WRITE_METHOD or cls.flags & self.SC_EXTERNALIZABLE and cls.flags & self.SC_BLOCK_DATA ): for annotation in obj.annotations: log_debug( "Write annotation {0} for {1}".format(repr(annotation), repr(obj)) ) if annotation is None: self.write_null() else: self.writeObject(annotation) self._writeStruct(">B", 1, (self.TC_ENDBLOCKDATA,))
python
def write_object(self, obj, parent=None): """ Writes an object header to the serialization stream :param obj: Not yet used :param parent: Not yet used """ # Transform object for transformer in self.object_transformers: tmp_object = transformer.transform(obj) if tmp_object is not obj: obj = tmp_object break self._writeStruct(">B", 1, (self.TC_OBJECT,)) cls = obj.get_class() self.write_classdesc(cls) # Add reference self.references.append([]) logging.debug( "*** Adding ref 0x%X for object %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj, ) all_names = collections.deque() all_types = collections.deque() tmpcls = cls while tmpcls: all_names.extendleft(reversed(tmpcls.fields_names)) all_types.extendleft(reversed(tmpcls.fields_types)) tmpcls = tmpcls.superclass del tmpcls logging.debug("<=> Field names: %s", all_names) logging.debug("<=> Field types: %s", all_types) for field_name, field_type in zip(all_names, all_types): try: logging.debug( "Writing field %s (%s): %s", field_name, field_type, getattr(obj, field_name), ) self._write_value(field_type, getattr(obj, field_name)) except AttributeError as ex: log_error( "No attribute {0} for object {1}\nDir: {2}".format( ex, repr(obj), dir(obj) ) ) raise del all_names, all_types if ( cls.flags & self.SC_SERIALIZABLE and cls.flags & self.SC_WRITE_METHOD or cls.flags & self.SC_EXTERNALIZABLE and cls.flags & self.SC_BLOCK_DATA ): for annotation in obj.annotations: log_debug( "Write annotation {0} for {1}".format(repr(annotation), repr(obj)) ) if annotation is None: self.write_null() else: self.writeObject(annotation) self._writeStruct(">B", 1, (self.TC_ENDBLOCKDATA,))
[ "def", "write_object", "(", "self", ",", "obj", ",", "parent", "=", "None", ")", ":", "# Transform object", "for", "transformer", "in", "self", ".", "object_transformers", ":", "tmp_object", "=", "transformer", ".", "transform", "(", "obj", ")", "if", "tmp_object", "is", "not", "obj", ":", "obj", "=", "tmp_object", "break", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "TC_OBJECT", ",", ")", ")", "cls", "=", "obj", ".", "get_class", "(", ")", "self", ".", "write_classdesc", "(", "cls", ")", "# Add reference", "self", ".", "references", ".", "append", "(", "[", "]", ")", "logging", ".", "debug", "(", "\"*** Adding ref 0x%X for object %s\"", ",", "len", "(", "self", ".", "references", ")", "-", "1", "+", "self", ".", "BASE_REFERENCE_IDX", ",", "obj", ",", ")", "all_names", "=", "collections", ".", "deque", "(", ")", "all_types", "=", "collections", ".", "deque", "(", ")", "tmpcls", "=", "cls", "while", "tmpcls", ":", "all_names", ".", "extendleft", "(", "reversed", "(", "tmpcls", ".", "fields_names", ")", ")", "all_types", ".", "extendleft", "(", "reversed", "(", "tmpcls", ".", "fields_types", ")", ")", "tmpcls", "=", "tmpcls", ".", "superclass", "del", "tmpcls", "logging", ".", "debug", "(", "\"<=> Field names: %s\"", ",", "all_names", ")", "logging", ".", "debug", "(", "\"<=> Field types: %s\"", ",", "all_types", ")", "for", "field_name", ",", "field_type", "in", "zip", "(", "all_names", ",", "all_types", ")", ":", "try", ":", "logging", ".", "debug", "(", "\"Writing field %s (%s): %s\"", ",", "field_name", ",", "field_type", ",", "getattr", "(", "obj", ",", "field_name", ")", ",", ")", "self", ".", "_write_value", "(", "field_type", ",", "getattr", "(", "obj", ",", "field_name", ")", ")", "except", "AttributeError", "as", "ex", ":", "log_error", "(", "\"No attribute {0} for object {1}\\nDir: {2}\"", ".", "format", "(", "ex", ",", "repr", "(", "obj", ")", ",", "dir", "(", "obj", ")", ")", ")", "raise", "del", "all_names", ",", "all_types", "if", "(", "cls", ".", "flags", "&", "self", ".", "SC_SERIALIZABLE", "and", "cls", ".", "flags", "&", "self", ".", "SC_WRITE_METHOD", "or", "cls", ".", "flags", "&", "self", ".", "SC_EXTERNALIZABLE", "and", "cls", ".", "flags", "&", "self", ".", "SC_BLOCK_DATA", ")", ":", "for", "annotation", "in", "obj", ".", "annotations", ":", "log_debug", "(", "\"Write annotation {0} for {1}\"", ".", "format", "(", "repr", "(", "annotation", ")", ",", "repr", "(", "obj", ")", ")", ")", "if", "annotation", "is", "None", ":", "self", ".", "write_null", "(", ")", "else", ":", "self", ".", "writeObject", "(", "annotation", ")", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "TC_ENDBLOCKDATA", ",", ")", ")" ]
Writes an object header to the serialization stream :param obj: Not yet used :param parent: Not yet used
[ "Writes", "an", "object", "header", "to", "the", "serialization", "stream" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1414-L1484
2,934
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller.write_class
def write_class(self, obj, parent=None): """ Writes a class to the stream :param obj: A JavaClass object :param parent: """ self._writeStruct(">B", 1, (self.TC_CLASS,)) self.write_classdesc(obj)
python
def write_class(self, obj, parent=None): """ Writes a class to the stream :param obj: A JavaClass object :param parent: """ self._writeStruct(">B", 1, (self.TC_CLASS,)) self.write_classdesc(obj)
[ "def", "write_class", "(", "self", ",", "obj", ",", "parent", "=", "None", ")", ":", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "TC_CLASS", ",", ")", ")", "self", ".", "write_classdesc", "(", "obj", ")" ]
Writes a class to the stream :param obj: A JavaClass object :param parent:
[ "Writes", "a", "class", "to", "the", "stream" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1486-L1494
2,935
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller.write_classdesc
def write_classdesc(self, obj, parent=None): """ Writes a class description :param obj: Class description to write :param parent: """ if obj not in self.references: # Add reference self.references.append(obj) logging.debug( "*** Adding ref 0x%X for classdesc %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj.name, ) self._writeStruct(">B", 1, (self.TC_CLASSDESC,)) self._writeString(obj.name) self._writeStruct(">qB", 1, (obj.serialVersionUID, obj.flags)) self._writeStruct(">H", 1, (len(obj.fields_names),)) for field_name, field_type in zip(obj.fields_names, obj.fields_types): self._writeStruct(">B", 1, (self._convert_type_to_char(field_type),)) self._writeString(field_name) if field_type[0] in (self.TYPE_OBJECT, self.TYPE_ARRAY): try: idx = self.references.index(field_type) except ValueError: # First appearance of the type self.references.append(field_type) logging.debug( "*** Adding ref 0x%X for field type %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, field_type, ) self.write_string(field_type, False) else: # Write a reference to the previous type logging.debug( "*** Reusing ref 0x%X for %s (%s)", idx + self.BASE_REFERENCE_IDX, field_type, field_name, ) self.write_reference(idx) self._writeStruct(">B", 1, (self.TC_ENDBLOCKDATA,)) if obj.superclass: self.write_classdesc(obj.superclass) else: self.write_null() else: # Use reference self.write_reference(self.references.index(obj))
python
def write_classdesc(self, obj, parent=None): """ Writes a class description :param obj: Class description to write :param parent: """ if obj not in self.references: # Add reference self.references.append(obj) logging.debug( "*** Adding ref 0x%X for classdesc %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj.name, ) self._writeStruct(">B", 1, (self.TC_CLASSDESC,)) self._writeString(obj.name) self._writeStruct(">qB", 1, (obj.serialVersionUID, obj.flags)) self._writeStruct(">H", 1, (len(obj.fields_names),)) for field_name, field_type in zip(obj.fields_names, obj.fields_types): self._writeStruct(">B", 1, (self._convert_type_to_char(field_type),)) self._writeString(field_name) if field_type[0] in (self.TYPE_OBJECT, self.TYPE_ARRAY): try: idx = self.references.index(field_type) except ValueError: # First appearance of the type self.references.append(field_type) logging.debug( "*** Adding ref 0x%X for field type %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, field_type, ) self.write_string(field_type, False) else: # Write a reference to the previous type logging.debug( "*** Reusing ref 0x%X for %s (%s)", idx + self.BASE_REFERENCE_IDX, field_type, field_name, ) self.write_reference(idx) self._writeStruct(">B", 1, (self.TC_ENDBLOCKDATA,)) if obj.superclass: self.write_classdesc(obj.superclass) else: self.write_null() else: # Use reference self.write_reference(self.references.index(obj))
[ "def", "write_classdesc", "(", "self", ",", "obj", ",", "parent", "=", "None", ")", ":", "if", "obj", "not", "in", "self", ".", "references", ":", "# Add reference", "self", ".", "references", ".", "append", "(", "obj", ")", "logging", ".", "debug", "(", "\"*** Adding ref 0x%X for classdesc %s\"", ",", "len", "(", "self", ".", "references", ")", "-", "1", "+", "self", ".", "BASE_REFERENCE_IDX", ",", "obj", ".", "name", ",", ")", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "TC_CLASSDESC", ",", ")", ")", "self", ".", "_writeString", "(", "obj", ".", "name", ")", "self", ".", "_writeStruct", "(", "\">qB\"", ",", "1", ",", "(", "obj", ".", "serialVersionUID", ",", "obj", ".", "flags", ")", ")", "self", ".", "_writeStruct", "(", "\">H\"", ",", "1", ",", "(", "len", "(", "obj", ".", "fields_names", ")", ",", ")", ")", "for", "field_name", ",", "field_type", "in", "zip", "(", "obj", ".", "fields_names", ",", "obj", ".", "fields_types", ")", ":", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "_convert_type_to_char", "(", "field_type", ")", ",", ")", ")", "self", ".", "_writeString", "(", "field_name", ")", "if", "field_type", "[", "0", "]", "in", "(", "self", ".", "TYPE_OBJECT", ",", "self", ".", "TYPE_ARRAY", ")", ":", "try", ":", "idx", "=", "self", ".", "references", ".", "index", "(", "field_type", ")", "except", "ValueError", ":", "# First appearance of the type", "self", ".", "references", ".", "append", "(", "field_type", ")", "logging", ".", "debug", "(", "\"*** Adding ref 0x%X for field type %s\"", ",", "len", "(", "self", ".", "references", ")", "-", "1", "+", "self", ".", "BASE_REFERENCE_IDX", ",", "field_type", ",", ")", "self", ".", "write_string", "(", "field_type", ",", "False", ")", "else", ":", "# Write a reference to the previous type", "logging", ".", "debug", "(", "\"*** Reusing ref 0x%X for %s (%s)\"", ",", "idx", "+", "self", ".", "BASE_REFERENCE_IDX", ",", "field_type", ",", "field_name", ",", ")", "self", ".", "write_reference", "(", "idx", ")", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "TC_ENDBLOCKDATA", ",", ")", ")", "if", "obj", ".", "superclass", ":", "self", ".", "write_classdesc", "(", "obj", ".", "superclass", ")", "else", ":", "self", ".", "write_null", "(", ")", "else", ":", "# Use reference", "self", ".", "write_reference", "(", "self", ".", "references", ".", "index", "(", "obj", ")", ")" ]
Writes a class description :param obj: Class description to write :param parent:
[ "Writes", "a", "class", "description" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1496-L1550
2,936
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller.write_array
def write_array(self, obj): """ Writes a JavaArray :param obj: A JavaArray object """ classdesc = obj.get_class() self._writeStruct(">B", 1, (self.TC_ARRAY,)) self.write_classdesc(classdesc) self._writeStruct(">i", 1, (len(obj),)) # Add reference self.references.append(obj) logging.debug( "*** Adding ref 0x%X for array []", len(self.references) - 1 + self.BASE_REFERENCE_IDX, ) type_char = classdesc.name[0] assert type_char == self.TYPE_ARRAY type_char = classdesc.name[1] if type_char == self.TYPE_OBJECT: for o in obj: self._write_value(classdesc.name[1:], o) elif type_char == self.TYPE_ARRAY: for a in obj: self.write_array(a) else: log_debug("Write array of type %s" % type_char) for v in obj: log_debug("Writing: %s" % v) self._write_value(type_char, v)
python
def write_array(self, obj): """ Writes a JavaArray :param obj: A JavaArray object """ classdesc = obj.get_class() self._writeStruct(">B", 1, (self.TC_ARRAY,)) self.write_classdesc(classdesc) self._writeStruct(">i", 1, (len(obj),)) # Add reference self.references.append(obj) logging.debug( "*** Adding ref 0x%X for array []", len(self.references) - 1 + self.BASE_REFERENCE_IDX, ) type_char = classdesc.name[0] assert type_char == self.TYPE_ARRAY type_char = classdesc.name[1] if type_char == self.TYPE_OBJECT: for o in obj: self._write_value(classdesc.name[1:], o) elif type_char == self.TYPE_ARRAY: for a in obj: self.write_array(a) else: log_debug("Write array of type %s" % type_char) for v in obj: log_debug("Writing: %s" % v) self._write_value(type_char, v)
[ "def", "write_array", "(", "self", ",", "obj", ")", ":", "classdesc", "=", "obj", ".", "get_class", "(", ")", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "self", ".", "TC_ARRAY", ",", ")", ")", "self", ".", "write_classdesc", "(", "classdesc", ")", "self", ".", "_writeStruct", "(", "\">i\"", ",", "1", ",", "(", "len", "(", "obj", ")", ",", ")", ")", "# Add reference", "self", ".", "references", ".", "append", "(", "obj", ")", "logging", ".", "debug", "(", "\"*** Adding ref 0x%X for array []\"", ",", "len", "(", "self", ".", "references", ")", "-", "1", "+", "self", ".", "BASE_REFERENCE_IDX", ",", ")", "type_char", "=", "classdesc", ".", "name", "[", "0", "]", "assert", "type_char", "==", "self", ".", "TYPE_ARRAY", "type_char", "=", "classdesc", ".", "name", "[", "1", "]", "if", "type_char", "==", "self", ".", "TYPE_OBJECT", ":", "for", "o", "in", "obj", ":", "self", ".", "_write_value", "(", "classdesc", ".", "name", "[", "1", ":", "]", ",", "o", ")", "elif", "type_char", "==", "self", ".", "TYPE_ARRAY", ":", "for", "a", "in", "obj", ":", "self", ".", "write_array", "(", "a", ")", "else", ":", "log_debug", "(", "\"Write array of type %s\"", "%", "type_char", ")", "for", "v", "in", "obj", ":", "log_debug", "(", "\"Writing: %s\"", "%", "v", ")", "self", ".", "_write_value", "(", "type_char", ",", "v", ")" ]
Writes a JavaArray :param obj: A JavaArray object
[ "Writes", "a", "JavaArray" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1561-L1593
2,937
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller._write_value
def _write_value(self, field_type, value): """ Writes an item of an array :param field_type: Value type :param value: The value itself """ if len(field_type) > 1: # We don't need details for arrays and objects field_type = field_type[0] if field_type == self.TYPE_BOOLEAN: self._writeStruct(">B", 1, (1 if value else 0,)) elif field_type == self.TYPE_BYTE: self._writeStruct(">b", 1, (value,)) elif field_type == self.TYPE_CHAR: self._writeStruct(">H", 1, (ord(value),)) elif field_type == self.TYPE_SHORT: self._writeStruct(">h", 1, (value,)) elif field_type == self.TYPE_INTEGER: self._writeStruct(">i", 1, (value,)) elif field_type == self.TYPE_LONG: self._writeStruct(">q", 1, (value,)) elif field_type == self.TYPE_FLOAT: self._writeStruct(">f", 1, (value,)) elif field_type == self.TYPE_DOUBLE: self._writeStruct(">d", 1, (value,)) elif field_type == self.TYPE_OBJECT or field_type == self.TYPE_ARRAY: if value is None: self.write_null() elif isinstance(value, JavaEnum): self.write_enum(value) elif isinstance(value, (JavaArray, JavaByteArray)): self.write_array(value) elif isinstance(value, JavaObject): self.write_object(value) elif isinstance(value, JavaString): self.write_string(value) elif isinstance(value, str): self.write_blockdata(value) else: raise RuntimeError("Unknown typecode: {0}".format(field_type)) else: raise RuntimeError("Unknown typecode: {0}".format(field_type))
python
def _write_value(self, field_type, value): """ Writes an item of an array :param field_type: Value type :param value: The value itself """ if len(field_type) > 1: # We don't need details for arrays and objects field_type = field_type[0] if field_type == self.TYPE_BOOLEAN: self._writeStruct(">B", 1, (1 if value else 0,)) elif field_type == self.TYPE_BYTE: self._writeStruct(">b", 1, (value,)) elif field_type == self.TYPE_CHAR: self._writeStruct(">H", 1, (ord(value),)) elif field_type == self.TYPE_SHORT: self._writeStruct(">h", 1, (value,)) elif field_type == self.TYPE_INTEGER: self._writeStruct(">i", 1, (value,)) elif field_type == self.TYPE_LONG: self._writeStruct(">q", 1, (value,)) elif field_type == self.TYPE_FLOAT: self._writeStruct(">f", 1, (value,)) elif field_type == self.TYPE_DOUBLE: self._writeStruct(">d", 1, (value,)) elif field_type == self.TYPE_OBJECT or field_type == self.TYPE_ARRAY: if value is None: self.write_null() elif isinstance(value, JavaEnum): self.write_enum(value) elif isinstance(value, (JavaArray, JavaByteArray)): self.write_array(value) elif isinstance(value, JavaObject): self.write_object(value) elif isinstance(value, JavaString): self.write_string(value) elif isinstance(value, str): self.write_blockdata(value) else: raise RuntimeError("Unknown typecode: {0}".format(field_type)) else: raise RuntimeError("Unknown typecode: {0}".format(field_type))
[ "def", "_write_value", "(", "self", ",", "field_type", ",", "value", ")", ":", "if", "len", "(", "field_type", ")", ">", "1", ":", "# We don't need details for arrays and objects", "field_type", "=", "field_type", "[", "0", "]", "if", "field_type", "==", "self", ".", "TYPE_BOOLEAN", ":", "self", ".", "_writeStruct", "(", "\">B\"", ",", "1", ",", "(", "1", "if", "value", "else", "0", ",", ")", ")", "elif", "field_type", "==", "self", ".", "TYPE_BYTE", ":", "self", ".", "_writeStruct", "(", "\">b\"", ",", "1", ",", "(", "value", ",", ")", ")", "elif", "field_type", "==", "self", ".", "TYPE_CHAR", ":", "self", ".", "_writeStruct", "(", "\">H\"", ",", "1", ",", "(", "ord", "(", "value", ")", ",", ")", ")", "elif", "field_type", "==", "self", ".", "TYPE_SHORT", ":", "self", ".", "_writeStruct", "(", "\">h\"", ",", "1", ",", "(", "value", ",", ")", ")", "elif", "field_type", "==", "self", ".", "TYPE_INTEGER", ":", "self", ".", "_writeStruct", "(", "\">i\"", ",", "1", ",", "(", "value", ",", ")", ")", "elif", "field_type", "==", "self", ".", "TYPE_LONG", ":", "self", ".", "_writeStruct", "(", "\">q\"", ",", "1", ",", "(", "value", ",", ")", ")", "elif", "field_type", "==", "self", ".", "TYPE_FLOAT", ":", "self", ".", "_writeStruct", "(", "\">f\"", ",", "1", ",", "(", "value", ",", ")", ")", "elif", "field_type", "==", "self", ".", "TYPE_DOUBLE", ":", "self", ".", "_writeStruct", "(", "\">d\"", ",", "1", ",", "(", "value", ",", ")", ")", "elif", "field_type", "==", "self", ".", "TYPE_OBJECT", "or", "field_type", "==", "self", ".", "TYPE_ARRAY", ":", "if", "value", "is", "None", ":", "self", ".", "write_null", "(", ")", "elif", "isinstance", "(", "value", ",", "JavaEnum", ")", ":", "self", ".", "write_enum", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "(", "JavaArray", ",", "JavaByteArray", ")", ")", ":", "self", ".", "write_array", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "JavaObject", ")", ":", "self", ".", "write_object", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "JavaString", ")", ":", "self", ".", "write_string", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "self", ".", "write_blockdata", "(", "value", ")", "else", ":", "raise", "RuntimeError", "(", "\"Unknown typecode: {0}\"", ".", "format", "(", "field_type", ")", ")", "else", ":", "raise", "RuntimeError", "(", "\"Unknown typecode: {0}\"", ".", "format", "(", "field_type", ")", ")" ]
Writes an item of an array :param field_type: Value type :param value: The value itself
[ "Writes", "an", "item", "of", "an", "array" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1595-L1638
2,938
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller._convert_type_to_char
def _convert_type_to_char(self, type_char): """ Converts the given type code to an int :param type_char: A type code character """ typecode = type_char if type(type_char) is int: typecode = chr(type_char) if typecode in self.TYPECODES_LIST: return ord(typecode) elif len(typecode) > 1: if typecode[0] == "L": return ord(self.TYPE_OBJECT) elif typecode[0] == "[": return ord(self.TYPE_ARRAY) raise RuntimeError( "Typecode {0} ({1}) isn't supported.".format(type_char, typecode) )
python
def _convert_type_to_char(self, type_char): """ Converts the given type code to an int :param type_char: A type code character """ typecode = type_char if type(type_char) is int: typecode = chr(type_char) if typecode in self.TYPECODES_LIST: return ord(typecode) elif len(typecode) > 1: if typecode[0] == "L": return ord(self.TYPE_OBJECT) elif typecode[0] == "[": return ord(self.TYPE_ARRAY) raise RuntimeError( "Typecode {0} ({1}) isn't supported.".format(type_char, typecode) )
[ "def", "_convert_type_to_char", "(", "self", ",", "type_char", ")", ":", "typecode", "=", "type_char", "if", "type", "(", "type_char", ")", "is", "int", ":", "typecode", "=", "chr", "(", "type_char", ")", "if", "typecode", "in", "self", ".", "TYPECODES_LIST", ":", "return", "ord", "(", "typecode", ")", "elif", "len", "(", "typecode", ")", ">", "1", ":", "if", "typecode", "[", "0", "]", "==", "\"L\"", ":", "return", "ord", "(", "self", ".", "TYPE_OBJECT", ")", "elif", "typecode", "[", "0", "]", "==", "\"[\"", ":", "return", "ord", "(", "self", ".", "TYPE_ARRAY", ")", "raise", "RuntimeError", "(", "\"Typecode {0} ({1}) isn't supported.\"", ".", "format", "(", "type_char", ",", "typecode", ")", ")" ]
Converts the given type code to an int :param type_char: A type code character
[ "Converts", "the", "given", "type", "code", "to", "an", "int" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1640-L1660
2,939
tcalmant/python-javaobj
javaobj/core.py
DefaultObjectTransformer.create
def create(self, classdesc, unmarshaller=None): # type: (JavaClass, JavaObjectUnmarshaller) -> JavaObject """ Transforms a deserialized Java object into a Python object :param classdesc: The description of a Java class :return: The Python form of the object, or the original JavaObject """ try: mapped_type = self.TYPE_MAPPER[classdesc.name] except KeyError: # Return a JavaObject by default return JavaObject() else: log_debug("---") log_debug(classdesc.name) log_debug("---") java_object = mapped_type(unmarshaller) log_debug(">>> java_object: {0}".format(java_object)) return java_object
python
def create(self, classdesc, unmarshaller=None): # type: (JavaClass, JavaObjectUnmarshaller) -> JavaObject """ Transforms a deserialized Java object into a Python object :param classdesc: The description of a Java class :return: The Python form of the object, or the original JavaObject """ try: mapped_type = self.TYPE_MAPPER[classdesc.name] except KeyError: # Return a JavaObject by default return JavaObject() else: log_debug("---") log_debug(classdesc.name) log_debug("---") java_object = mapped_type(unmarshaller) log_debug(">>> java_object: {0}".format(java_object)) return java_object
[ "def", "create", "(", "self", ",", "classdesc", ",", "unmarshaller", "=", "None", ")", ":", "# type: (JavaClass, JavaObjectUnmarshaller) -> JavaObject", "try", ":", "mapped_type", "=", "self", ".", "TYPE_MAPPER", "[", "classdesc", ".", "name", "]", "except", "KeyError", ":", "# Return a JavaObject by default", "return", "JavaObject", "(", ")", "else", ":", "log_debug", "(", "\"---\"", ")", "log_debug", "(", "classdesc", ".", "name", ")", "log_debug", "(", "\"---\"", ")", "java_object", "=", "mapped_type", "(", "unmarshaller", ")", "log_debug", "(", "\">>> java_object: {0}\"", ".", "format", "(", "java_object", ")", ")", "return", "java_object" ]
Transforms a deserialized Java object into a Python object :param classdesc: The description of a Java class :return: The Python form of the object, or the original JavaObject
[ "Transforms", "a", "deserialized", "Java", "object", "into", "a", "Python", "object" ]
e042c2cbf1ce9de659b6cb9290b5ccd5442514d1
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L2002-L2023
2,940
UDST/urbansim
urbansim/urbanchoice/mnl.py
mnl_simulate
def mnl_simulate(data, coeff, numalts, GPU=False, returnprobs=True): """ Get the probabilities for each chooser choosing between `numalts` alternatives. Parameters ---------- data : 2D array The data are expected to be in "long" form where each row is for one alternative. Alternatives are in groups of `numalts` rows per choosers. Alternatives must be in the same order for each chooser. coeff : 1D array The model coefficients corresponding to each column in `data`. numalts : int The number of alternatives available to each chooser. GPU : bool, optional returnprobs : bool, optional If True, return the probabilities for each chooser/alternative instead of actual choices. Returns ------- probs or choices: 2D array If `returnprobs` is True the probabilities are a 2D array with a row for each chooser and columns for each alternative. """ logger.debug( 'start: MNL simulation with len(data)={} and numalts={}'.format( len(data), numalts)) atype = 'numpy' if not GPU else 'cuda' data = np.transpose(data) coeff = np.reshape(np.array(coeff), (1, len(coeff))) data, coeff = PMAT(data, atype), PMAT(coeff, atype) probs = mnl_probs(data, coeff, numalts) if returnprobs: return np.transpose(probs.get_mat()) # convert to cpu from here on - gpu doesn't currently support these ops if probs.typ == 'cuda': probs = PMAT(probs.get_mat()) probs = probs.cumsum(axis=0) r = pmat.random(probs.size() // numalts) choices = probs.subtract(r, inplace=True).firstpositive(axis=0) logger.debug('finish: MNL simulation') return choices.get_mat()
python
def mnl_simulate(data, coeff, numalts, GPU=False, returnprobs=True): """ Get the probabilities for each chooser choosing between `numalts` alternatives. Parameters ---------- data : 2D array The data are expected to be in "long" form where each row is for one alternative. Alternatives are in groups of `numalts` rows per choosers. Alternatives must be in the same order for each chooser. coeff : 1D array The model coefficients corresponding to each column in `data`. numalts : int The number of alternatives available to each chooser. GPU : bool, optional returnprobs : bool, optional If True, return the probabilities for each chooser/alternative instead of actual choices. Returns ------- probs or choices: 2D array If `returnprobs` is True the probabilities are a 2D array with a row for each chooser and columns for each alternative. """ logger.debug( 'start: MNL simulation with len(data)={} and numalts={}'.format( len(data), numalts)) atype = 'numpy' if not GPU else 'cuda' data = np.transpose(data) coeff = np.reshape(np.array(coeff), (1, len(coeff))) data, coeff = PMAT(data, atype), PMAT(coeff, atype) probs = mnl_probs(data, coeff, numalts) if returnprobs: return np.transpose(probs.get_mat()) # convert to cpu from here on - gpu doesn't currently support these ops if probs.typ == 'cuda': probs = PMAT(probs.get_mat()) probs = probs.cumsum(axis=0) r = pmat.random(probs.size() // numalts) choices = probs.subtract(r, inplace=True).firstpositive(axis=0) logger.debug('finish: MNL simulation') return choices.get_mat()
[ "def", "mnl_simulate", "(", "data", ",", "coeff", ",", "numalts", ",", "GPU", "=", "False", ",", "returnprobs", "=", "True", ")", ":", "logger", ".", "debug", "(", "'start: MNL simulation with len(data)={} and numalts={}'", ".", "format", "(", "len", "(", "data", ")", ",", "numalts", ")", ")", "atype", "=", "'numpy'", "if", "not", "GPU", "else", "'cuda'", "data", "=", "np", ".", "transpose", "(", "data", ")", "coeff", "=", "np", ".", "reshape", "(", "np", ".", "array", "(", "coeff", ")", ",", "(", "1", ",", "len", "(", "coeff", ")", ")", ")", "data", ",", "coeff", "=", "PMAT", "(", "data", ",", "atype", ")", ",", "PMAT", "(", "coeff", ",", "atype", ")", "probs", "=", "mnl_probs", "(", "data", ",", "coeff", ",", "numalts", ")", "if", "returnprobs", ":", "return", "np", ".", "transpose", "(", "probs", ".", "get_mat", "(", ")", ")", "# convert to cpu from here on - gpu doesn't currently support these ops", "if", "probs", ".", "typ", "==", "'cuda'", ":", "probs", "=", "PMAT", "(", "probs", ".", "get_mat", "(", ")", ")", "probs", "=", "probs", ".", "cumsum", "(", "axis", "=", "0", ")", "r", "=", "pmat", ".", "random", "(", "probs", ".", "size", "(", ")", "//", "numalts", ")", "choices", "=", "probs", ".", "subtract", "(", "r", ",", "inplace", "=", "True", ")", ".", "firstpositive", "(", "axis", "=", "0", ")", "logger", ".", "debug", "(", "'finish: MNL simulation'", ")", "return", "choices", ".", "get_mat", "(", ")" ]
Get the probabilities for each chooser choosing between `numalts` alternatives. Parameters ---------- data : 2D array The data are expected to be in "long" form where each row is for one alternative. Alternatives are in groups of `numalts` rows per choosers. Alternatives must be in the same order for each chooser. coeff : 1D array The model coefficients corresponding to each column in `data`. numalts : int The number of alternatives available to each chooser. GPU : bool, optional returnprobs : bool, optional If True, return the probabilities for each chooser/alternative instead of actual choices. Returns ------- probs or choices: 2D array If `returnprobs` is True the probabilities are a 2D array with a row for each chooser and columns for each alternative.
[ "Get", "the", "probabilities", "for", "each", "chooser", "choosing", "between", "numalts", "alternatives", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/urbanchoice/mnl.py#L124-L175
2,941
UDST/urbansim
urbansim/urbanchoice/mnl.py
mnl_estimate
def mnl_estimate(data, chosen, numalts, GPU=False, coeffrange=(-3, 3), weights=None, lcgrad=False, beta=None): """ Calculate coefficients of the MNL model. Parameters ---------- data : 2D array The data are expected to be in "long" form where each row is for one alternative. Alternatives are in groups of `numalts` rows per choosers. Alternatives must be in the same order for each chooser. chosen : 2D array This boolean array has a row for each chooser and a column for each alternative. The column ordering for alternatives is expected to be the same as their row ordering in the `data` array. A one (True) indicates which alternative each chooser has chosen. numalts : int The number of alternatives. GPU : bool, optional coeffrange : tuple of floats, optional Limits of (min, max) to which coefficients are clipped. weights : ndarray, optional lcgrad : bool, optional beta : 1D array, optional Any initial guess for the coefficients. Returns ------- log_likelihood : dict Dictionary of log-likelihood values describing the quality of the model fit. fit_parameters : pandas.DataFrame Table of fit parameters with columns 'Coefficient', 'Std. Error', 'T-Score'. Each row corresponds to a column in `data` and are given in the same order as in `data`. See Also -------- scipy.optimize.fmin_l_bfgs_b : The optimization routine used. """ logger.debug( 'start: MNL fit with len(data)={} and numalts={}'.format( len(data), numalts)) atype = 'numpy' if not GPU else 'cuda' numvars = data.shape[1] numobs = data.shape[0] // numalts if chosen is None: chosen = np.ones((numobs, numalts)) # used for latent classes data = np.transpose(data) chosen = np.transpose(chosen) data, chosen = PMAT(data, atype), PMAT(chosen, atype) if weights is not None: weights = PMAT(np.transpose(weights), atype) if beta is None: beta = np.zeros(numvars) bounds = [coeffrange] * numvars with log_start_finish('scipy optimization for MNL fit', logger): args = (data, chosen, numalts, weights, lcgrad) bfgs_result = scipy.optimize.fmin_l_bfgs_b(mnl_loglik, beta, args=args, fprime=None, factr=10, approx_grad=False, bounds=bounds ) if bfgs_result[2]['warnflag'] > 0: logger.warn("mnl did not converge correctly: %s", bfgs_result) beta = bfgs_result[0] stderr = mnl_loglik( beta, data, chosen, numalts, weights, stderr=1, lcgrad=lcgrad) l0beta = np.zeros(numvars) l0 = -1 * mnl_loglik(l0beta, *args)[0] l1 = -1 * mnl_loglik(beta, *args)[0] log_likelihood = { 'null': float(l0[0][0]), 'convergence': float(l1[0][0]), 'ratio': float((1 - (l1 / l0))[0][0]) } fit_parameters = pd.DataFrame({ 'Coefficient': beta, 'Std. Error': stderr, 'T-Score': beta / stderr}) logger.debug('finish: MNL fit') return log_likelihood, fit_parameters
python
def mnl_estimate(data, chosen, numalts, GPU=False, coeffrange=(-3, 3), weights=None, lcgrad=False, beta=None): """ Calculate coefficients of the MNL model. Parameters ---------- data : 2D array The data are expected to be in "long" form where each row is for one alternative. Alternatives are in groups of `numalts` rows per choosers. Alternatives must be in the same order for each chooser. chosen : 2D array This boolean array has a row for each chooser and a column for each alternative. The column ordering for alternatives is expected to be the same as their row ordering in the `data` array. A one (True) indicates which alternative each chooser has chosen. numalts : int The number of alternatives. GPU : bool, optional coeffrange : tuple of floats, optional Limits of (min, max) to which coefficients are clipped. weights : ndarray, optional lcgrad : bool, optional beta : 1D array, optional Any initial guess for the coefficients. Returns ------- log_likelihood : dict Dictionary of log-likelihood values describing the quality of the model fit. fit_parameters : pandas.DataFrame Table of fit parameters with columns 'Coefficient', 'Std. Error', 'T-Score'. Each row corresponds to a column in `data` and are given in the same order as in `data`. See Also -------- scipy.optimize.fmin_l_bfgs_b : The optimization routine used. """ logger.debug( 'start: MNL fit with len(data)={} and numalts={}'.format( len(data), numalts)) atype = 'numpy' if not GPU else 'cuda' numvars = data.shape[1] numobs = data.shape[0] // numalts if chosen is None: chosen = np.ones((numobs, numalts)) # used for latent classes data = np.transpose(data) chosen = np.transpose(chosen) data, chosen = PMAT(data, atype), PMAT(chosen, atype) if weights is not None: weights = PMAT(np.transpose(weights), atype) if beta is None: beta = np.zeros(numvars) bounds = [coeffrange] * numvars with log_start_finish('scipy optimization for MNL fit', logger): args = (data, chosen, numalts, weights, lcgrad) bfgs_result = scipy.optimize.fmin_l_bfgs_b(mnl_loglik, beta, args=args, fprime=None, factr=10, approx_grad=False, bounds=bounds ) if bfgs_result[2]['warnflag'] > 0: logger.warn("mnl did not converge correctly: %s", bfgs_result) beta = bfgs_result[0] stderr = mnl_loglik( beta, data, chosen, numalts, weights, stderr=1, lcgrad=lcgrad) l0beta = np.zeros(numvars) l0 = -1 * mnl_loglik(l0beta, *args)[0] l1 = -1 * mnl_loglik(beta, *args)[0] log_likelihood = { 'null': float(l0[0][0]), 'convergence': float(l1[0][0]), 'ratio': float((1 - (l1 / l0))[0][0]) } fit_parameters = pd.DataFrame({ 'Coefficient': beta, 'Std. Error': stderr, 'T-Score': beta / stderr}) logger.debug('finish: MNL fit') return log_likelihood, fit_parameters
[ "def", "mnl_estimate", "(", "data", ",", "chosen", ",", "numalts", ",", "GPU", "=", "False", ",", "coeffrange", "=", "(", "-", "3", ",", "3", ")", ",", "weights", "=", "None", ",", "lcgrad", "=", "False", ",", "beta", "=", "None", ")", ":", "logger", ".", "debug", "(", "'start: MNL fit with len(data)={} and numalts={}'", ".", "format", "(", "len", "(", "data", ")", ",", "numalts", ")", ")", "atype", "=", "'numpy'", "if", "not", "GPU", "else", "'cuda'", "numvars", "=", "data", ".", "shape", "[", "1", "]", "numobs", "=", "data", ".", "shape", "[", "0", "]", "//", "numalts", "if", "chosen", "is", "None", ":", "chosen", "=", "np", ".", "ones", "(", "(", "numobs", ",", "numalts", ")", ")", "# used for latent classes", "data", "=", "np", ".", "transpose", "(", "data", ")", "chosen", "=", "np", ".", "transpose", "(", "chosen", ")", "data", ",", "chosen", "=", "PMAT", "(", "data", ",", "atype", ")", ",", "PMAT", "(", "chosen", ",", "atype", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "PMAT", "(", "np", ".", "transpose", "(", "weights", ")", ",", "atype", ")", "if", "beta", "is", "None", ":", "beta", "=", "np", ".", "zeros", "(", "numvars", ")", "bounds", "=", "[", "coeffrange", "]", "*", "numvars", "with", "log_start_finish", "(", "'scipy optimization for MNL fit'", ",", "logger", ")", ":", "args", "=", "(", "data", ",", "chosen", ",", "numalts", ",", "weights", ",", "lcgrad", ")", "bfgs_result", "=", "scipy", ".", "optimize", ".", "fmin_l_bfgs_b", "(", "mnl_loglik", ",", "beta", ",", "args", "=", "args", ",", "fprime", "=", "None", ",", "factr", "=", "10", ",", "approx_grad", "=", "False", ",", "bounds", "=", "bounds", ")", "if", "bfgs_result", "[", "2", "]", "[", "'warnflag'", "]", ">", "0", ":", "logger", ".", "warn", "(", "\"mnl did not converge correctly: %s\"", ",", "bfgs_result", ")", "beta", "=", "bfgs_result", "[", "0", "]", "stderr", "=", "mnl_loglik", "(", "beta", ",", "data", ",", "chosen", ",", "numalts", ",", "weights", ",", "stderr", "=", "1", ",", "lcgrad", "=", "lcgrad", ")", "l0beta", "=", "np", ".", "zeros", "(", "numvars", ")", "l0", "=", "-", "1", "*", "mnl_loglik", "(", "l0beta", ",", "*", "args", ")", "[", "0", "]", "l1", "=", "-", "1", "*", "mnl_loglik", "(", "beta", ",", "*", "args", ")", "[", "0", "]", "log_likelihood", "=", "{", "'null'", ":", "float", "(", "l0", "[", "0", "]", "[", "0", "]", ")", ",", "'convergence'", ":", "float", "(", "l1", "[", "0", "]", "[", "0", "]", ")", ",", "'ratio'", ":", "float", "(", "(", "1", "-", "(", "l1", "/", "l0", ")", ")", "[", "0", "]", "[", "0", "]", ")", "}", "fit_parameters", "=", "pd", ".", "DataFrame", "(", "{", "'Coefficient'", ":", "beta", ",", "'Std. Error'", ":", "stderr", ",", "'T-Score'", ":", "beta", "/", "stderr", "}", ")", "logger", ".", "debug", "(", "'finish: MNL fit'", ")", "return", "log_likelihood", ",", "fit_parameters" ]
Calculate coefficients of the MNL model. Parameters ---------- data : 2D array The data are expected to be in "long" form where each row is for one alternative. Alternatives are in groups of `numalts` rows per choosers. Alternatives must be in the same order for each chooser. chosen : 2D array This boolean array has a row for each chooser and a column for each alternative. The column ordering for alternatives is expected to be the same as their row ordering in the `data` array. A one (True) indicates which alternative each chooser has chosen. numalts : int The number of alternatives. GPU : bool, optional coeffrange : tuple of floats, optional Limits of (min, max) to which coefficients are clipped. weights : ndarray, optional lcgrad : bool, optional beta : 1D array, optional Any initial guess for the coefficients. Returns ------- log_likelihood : dict Dictionary of log-likelihood values describing the quality of the model fit. fit_parameters : pandas.DataFrame Table of fit parameters with columns 'Coefficient', 'Std. Error', 'T-Score'. Each row corresponds to a column in `data` and are given in the same order as in `data`. See Also -------- scipy.optimize.fmin_l_bfgs_b : The optimization routine used.
[ "Calculate", "coefficients", "of", "the", "MNL", "model", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/urbanchoice/mnl.py#L178-L275
2,942
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.from_yaml
def from_yaml(cls, yaml_str=None, str_or_buffer=None): """ Create a DiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- MNLDiscreteChoiceModel """ cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) model = cls( cfg['model_expression'], cfg['sample_size'], probability_mode=cfg.get('probability_mode', 'full_product'), choice_mode=cfg.get('choice_mode', 'individual'), choosers_fit_filters=cfg.get('choosers_fit_filters', None), choosers_predict_filters=cfg.get('choosers_predict_filters', None), alts_fit_filters=cfg.get('alts_fit_filters', None), alts_predict_filters=cfg.get('alts_predict_filters', None), interaction_predict_filters=cfg.get( 'interaction_predict_filters', None), estimation_sample_size=cfg.get('estimation_sample_size', None), prediction_sample_size=cfg.get('prediction_sample_size', None), choice_column=cfg.get('choice_column', None), name=cfg.get('name', None) ) if cfg.get('log_likelihoods', None): model.log_likelihoods = cfg['log_likelihoods'] if cfg.get('fit_parameters', None): model.fit_parameters = pd.DataFrame(cfg['fit_parameters']) logger.debug('loaded LCM model {} from YAML'.format(model.name)) return model
python
def from_yaml(cls, yaml_str=None, str_or_buffer=None): """ Create a DiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- MNLDiscreteChoiceModel """ cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) model = cls( cfg['model_expression'], cfg['sample_size'], probability_mode=cfg.get('probability_mode', 'full_product'), choice_mode=cfg.get('choice_mode', 'individual'), choosers_fit_filters=cfg.get('choosers_fit_filters', None), choosers_predict_filters=cfg.get('choosers_predict_filters', None), alts_fit_filters=cfg.get('alts_fit_filters', None), alts_predict_filters=cfg.get('alts_predict_filters', None), interaction_predict_filters=cfg.get( 'interaction_predict_filters', None), estimation_sample_size=cfg.get('estimation_sample_size', None), prediction_sample_size=cfg.get('prediction_sample_size', None), choice_column=cfg.get('choice_column', None), name=cfg.get('name', None) ) if cfg.get('log_likelihoods', None): model.log_likelihoods = cfg['log_likelihoods'] if cfg.get('fit_parameters', None): model.fit_parameters = pd.DataFrame(cfg['fit_parameters']) logger.debug('loaded LCM model {} from YAML'.format(model.name)) return model
[ "def", "from_yaml", "(", "cls", ",", "yaml_str", "=", "None", ",", "str_or_buffer", "=", "None", ")", ":", "cfg", "=", "yamlio", ".", "yaml_to_dict", "(", "yaml_str", ",", "str_or_buffer", ")", "model", "=", "cls", "(", "cfg", "[", "'model_expression'", "]", ",", "cfg", "[", "'sample_size'", "]", ",", "probability_mode", "=", "cfg", ".", "get", "(", "'probability_mode'", ",", "'full_product'", ")", ",", "choice_mode", "=", "cfg", ".", "get", "(", "'choice_mode'", ",", "'individual'", ")", ",", "choosers_fit_filters", "=", "cfg", ".", "get", "(", "'choosers_fit_filters'", ",", "None", ")", ",", "choosers_predict_filters", "=", "cfg", ".", "get", "(", "'choosers_predict_filters'", ",", "None", ")", ",", "alts_fit_filters", "=", "cfg", ".", "get", "(", "'alts_fit_filters'", ",", "None", ")", ",", "alts_predict_filters", "=", "cfg", ".", "get", "(", "'alts_predict_filters'", ",", "None", ")", ",", "interaction_predict_filters", "=", "cfg", ".", "get", "(", "'interaction_predict_filters'", ",", "None", ")", ",", "estimation_sample_size", "=", "cfg", ".", "get", "(", "'estimation_sample_size'", ",", "None", ")", ",", "prediction_sample_size", "=", "cfg", ".", "get", "(", "'prediction_sample_size'", ",", "None", ")", ",", "choice_column", "=", "cfg", ".", "get", "(", "'choice_column'", ",", "None", ")", ",", "name", "=", "cfg", ".", "get", "(", "'name'", ",", "None", ")", ")", "if", "cfg", ".", "get", "(", "'log_likelihoods'", ",", "None", ")", ":", "model", ".", "log_likelihoods", "=", "cfg", "[", "'log_likelihoods'", "]", "if", "cfg", ".", "get", "(", "'fit_parameters'", ",", "None", ")", ":", "model", ".", "fit_parameters", "=", "pd", ".", "DataFrame", "(", "cfg", "[", "'fit_parameters'", "]", ")", "logger", ".", "debug", "(", "'loaded LCM model {} from YAML'", ".", "format", "(", "model", ".", "name", ")", ")", "return", "model" ]
Create a DiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- MNLDiscreteChoiceModel
[ "Create", "a", "DiscreteChoiceModel", "instance", "from", "a", "saved", "YAML", "configuration", ".", "Arguments", "are", "mutally", "exclusive", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L278-L320
2,943
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.fit
def fit(self, choosers, alternatives, current_choice): """ Fit and save model parameters based on given data. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice : pandas.Series or any A Series describing the `alternatives` currently chosen by the `choosers`. Should have an index matching `choosers` and values matching the index of `alternatives`. If a non-Series is given it should be a column in `choosers`. Returns ------- log_likelihoods : dict Dict of log-liklihood values describing the quality of the model fit. Will have keys 'null', 'convergence', and 'ratio'. """ logger.debug('start: fit LCM model {}'.format(self.name)) if not isinstance(current_choice, pd.Series): current_choice = choosers[current_choice] choosers, alternatives = self.apply_fit_filters(choosers, alternatives) if self.estimation_sample_size: choosers = choosers.loc[np.random.choice( choosers.index, min(self.estimation_sample_size, len(choosers)), replace=False)] current_choice = current_choice.loc[choosers.index] _, merged, chosen = interaction.mnl_interaction_dataset( choosers, alternatives, self.sample_size, current_choice) model_design = dmatrix( self.str_model_expression, data=merged, return_type='dataframe') if len(merged) != model_design.as_matrix().shape[0]: raise ModelEvaluationError( 'Estimated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') self.log_likelihoods, self.fit_parameters = mnl.mnl_estimate( model_design.as_matrix(), chosen, self.sample_size) self.fit_parameters.index = model_design.columns logger.debug('finish: fit LCM model {}'.format(self.name)) return self.log_likelihoods
python
def fit(self, choosers, alternatives, current_choice): """ Fit and save model parameters based on given data. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice : pandas.Series or any A Series describing the `alternatives` currently chosen by the `choosers`. Should have an index matching `choosers` and values matching the index of `alternatives`. If a non-Series is given it should be a column in `choosers`. Returns ------- log_likelihoods : dict Dict of log-liklihood values describing the quality of the model fit. Will have keys 'null', 'convergence', and 'ratio'. """ logger.debug('start: fit LCM model {}'.format(self.name)) if not isinstance(current_choice, pd.Series): current_choice = choosers[current_choice] choosers, alternatives = self.apply_fit_filters(choosers, alternatives) if self.estimation_sample_size: choosers = choosers.loc[np.random.choice( choosers.index, min(self.estimation_sample_size, len(choosers)), replace=False)] current_choice = current_choice.loc[choosers.index] _, merged, chosen = interaction.mnl_interaction_dataset( choosers, alternatives, self.sample_size, current_choice) model_design = dmatrix( self.str_model_expression, data=merged, return_type='dataframe') if len(merged) != model_design.as_matrix().shape[0]: raise ModelEvaluationError( 'Estimated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') self.log_likelihoods, self.fit_parameters = mnl.mnl_estimate( model_design.as_matrix(), chosen, self.sample_size) self.fit_parameters.index = model_design.columns logger.debug('finish: fit LCM model {}'.format(self.name)) return self.log_likelihoods
[ "def", "fit", "(", "self", ",", "choosers", ",", "alternatives", ",", "current_choice", ")", ":", "logger", ".", "debug", "(", "'start: fit LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "if", "not", "isinstance", "(", "current_choice", ",", "pd", ".", "Series", ")", ":", "current_choice", "=", "choosers", "[", "current_choice", "]", "choosers", ",", "alternatives", "=", "self", ".", "apply_fit_filters", "(", "choosers", ",", "alternatives", ")", "if", "self", ".", "estimation_sample_size", ":", "choosers", "=", "choosers", ".", "loc", "[", "np", ".", "random", ".", "choice", "(", "choosers", ".", "index", ",", "min", "(", "self", ".", "estimation_sample_size", ",", "len", "(", "choosers", ")", ")", ",", "replace", "=", "False", ")", "]", "current_choice", "=", "current_choice", ".", "loc", "[", "choosers", ".", "index", "]", "_", ",", "merged", ",", "chosen", "=", "interaction", ".", "mnl_interaction_dataset", "(", "choosers", ",", "alternatives", ",", "self", ".", "sample_size", ",", "current_choice", ")", "model_design", "=", "dmatrix", "(", "self", ".", "str_model_expression", ",", "data", "=", "merged", ",", "return_type", "=", "'dataframe'", ")", "if", "len", "(", "merged", ")", "!=", "model_design", ".", "as_matrix", "(", ")", ".", "shape", "[", "0", "]", ":", "raise", "ModelEvaluationError", "(", "'Estimated data does not have the same length as input. '", "'This suggests there are null values in one or more of '", "'the input columns.'", ")", "self", ".", "log_likelihoods", ",", "self", ".", "fit_parameters", "=", "mnl", ".", "mnl_estimate", "(", "model_design", ".", "as_matrix", "(", ")", ",", "chosen", ",", "self", ".", "sample_size", ")", "self", ".", "fit_parameters", ".", "index", "=", "model_design", ".", "columns", "logger", ".", "debug", "(", "'finish: fit LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "self", ".", "log_likelihoods" ]
Fit and save model parameters based on given data. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice : pandas.Series or any A Series describing the `alternatives` currently chosen by the `choosers`. Should have an index matching `choosers` and values matching the index of `alternatives`. If a non-Series is given it should be a column in `choosers`. Returns ------- log_likelihoods : dict Dict of log-liklihood values describing the quality of the model fit. Will have keys 'null', 'convergence', and 'ratio'.
[ "Fit", "and", "save", "model", "parameters", "based", "on", "given", "data", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L371-L427
2,944
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.probabilities
def probabilities(self, choosers, alternatives, filter_tables=True): """ Returns the probabilities for a set of choosers to choose from among a set of alternatives. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. filter_tables : bool, optional If True, filter `choosers` and `alternatives` with prediction filters before calculating probabilities. Returns ------- probabilities : pandas.Series Probability of selection associated with each chooser and alternative. Index will be a MultiIndex with alternative IDs in the inner index and chooser IDs in the out index. """ logger.debug('start: calculate probabilities for LCM model {}'.format( self.name)) self.assert_fitted() if filter_tables: choosers, alternatives = self.apply_predict_filters( choosers, alternatives) if self.prediction_sample_size is not None: sample_size = self.prediction_sample_size else: sample_size = len(alternatives) if self.probability_mode == 'single_chooser': _, merged, _ = interaction.mnl_interaction_dataset( choosers.head(1), alternatives, sample_size) elif self.probability_mode == 'full_product': _, merged, _ = interaction.mnl_interaction_dataset( choosers, alternatives, sample_size) else: raise ValueError( 'Unrecognized probability_mode option: {}'.format( self.probability_mode)) merged = util.apply_filter_query( merged, self.interaction_predict_filters) model_design = dmatrix( self.str_model_expression, data=merged, return_type='dataframe') if len(merged) != model_design.as_matrix().shape[0]: raise ModelEvaluationError( 'Simulated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') # get the order of the coefficients in the same order as the # columns in the design matrix coeffs = [self.fit_parameters['Coefficient'][x] for x in model_design.columns] # probabilities are returned from mnl_simulate as a 2d array # with choosers along rows and alternatives along columns if self.probability_mode == 'single_chooser': numalts = len(merged) else: numalts = sample_size probabilities = mnl.mnl_simulate( model_design.as_matrix(), coeffs, numalts=numalts, returnprobs=True) # want to turn probabilities into a Series with a MultiIndex # of chooser IDs and alternative IDs. # indexing by chooser ID will get you the probabilities # across alternatives for that chooser mi = pd.MultiIndex.from_arrays( [merged['join_index'].values, merged.index.values], names=('chooser_id', 'alternative_id')) probabilities = pd.Series(probabilities.flatten(), index=mi) logger.debug('finish: calculate probabilities for LCM model {}'.format( self.name)) return probabilities
python
def probabilities(self, choosers, alternatives, filter_tables=True): """ Returns the probabilities for a set of choosers to choose from among a set of alternatives. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. filter_tables : bool, optional If True, filter `choosers` and `alternatives` with prediction filters before calculating probabilities. Returns ------- probabilities : pandas.Series Probability of selection associated with each chooser and alternative. Index will be a MultiIndex with alternative IDs in the inner index and chooser IDs in the out index. """ logger.debug('start: calculate probabilities for LCM model {}'.format( self.name)) self.assert_fitted() if filter_tables: choosers, alternatives = self.apply_predict_filters( choosers, alternatives) if self.prediction_sample_size is not None: sample_size = self.prediction_sample_size else: sample_size = len(alternatives) if self.probability_mode == 'single_chooser': _, merged, _ = interaction.mnl_interaction_dataset( choosers.head(1), alternatives, sample_size) elif self.probability_mode == 'full_product': _, merged, _ = interaction.mnl_interaction_dataset( choosers, alternatives, sample_size) else: raise ValueError( 'Unrecognized probability_mode option: {}'.format( self.probability_mode)) merged = util.apply_filter_query( merged, self.interaction_predict_filters) model_design = dmatrix( self.str_model_expression, data=merged, return_type='dataframe') if len(merged) != model_design.as_matrix().shape[0]: raise ModelEvaluationError( 'Simulated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') # get the order of the coefficients in the same order as the # columns in the design matrix coeffs = [self.fit_parameters['Coefficient'][x] for x in model_design.columns] # probabilities are returned from mnl_simulate as a 2d array # with choosers along rows and alternatives along columns if self.probability_mode == 'single_chooser': numalts = len(merged) else: numalts = sample_size probabilities = mnl.mnl_simulate( model_design.as_matrix(), coeffs, numalts=numalts, returnprobs=True) # want to turn probabilities into a Series with a MultiIndex # of chooser IDs and alternative IDs. # indexing by chooser ID will get you the probabilities # across alternatives for that chooser mi = pd.MultiIndex.from_arrays( [merged['join_index'].values, merged.index.values], names=('chooser_id', 'alternative_id')) probabilities = pd.Series(probabilities.flatten(), index=mi) logger.debug('finish: calculate probabilities for LCM model {}'.format( self.name)) return probabilities
[ "def", "probabilities", "(", "self", ",", "choosers", ",", "alternatives", ",", "filter_tables", "=", "True", ")", ":", "logger", ".", "debug", "(", "'start: calculate probabilities for LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "self", ".", "assert_fitted", "(", ")", "if", "filter_tables", ":", "choosers", ",", "alternatives", "=", "self", ".", "apply_predict_filters", "(", "choosers", ",", "alternatives", ")", "if", "self", ".", "prediction_sample_size", "is", "not", "None", ":", "sample_size", "=", "self", ".", "prediction_sample_size", "else", ":", "sample_size", "=", "len", "(", "alternatives", ")", "if", "self", ".", "probability_mode", "==", "'single_chooser'", ":", "_", ",", "merged", ",", "_", "=", "interaction", ".", "mnl_interaction_dataset", "(", "choosers", ".", "head", "(", "1", ")", ",", "alternatives", ",", "sample_size", ")", "elif", "self", ".", "probability_mode", "==", "'full_product'", ":", "_", ",", "merged", ",", "_", "=", "interaction", ".", "mnl_interaction_dataset", "(", "choosers", ",", "alternatives", ",", "sample_size", ")", "else", ":", "raise", "ValueError", "(", "'Unrecognized probability_mode option: {}'", ".", "format", "(", "self", ".", "probability_mode", ")", ")", "merged", "=", "util", ".", "apply_filter_query", "(", "merged", ",", "self", ".", "interaction_predict_filters", ")", "model_design", "=", "dmatrix", "(", "self", ".", "str_model_expression", ",", "data", "=", "merged", ",", "return_type", "=", "'dataframe'", ")", "if", "len", "(", "merged", ")", "!=", "model_design", ".", "as_matrix", "(", ")", ".", "shape", "[", "0", "]", ":", "raise", "ModelEvaluationError", "(", "'Simulated data does not have the same length as input. '", "'This suggests there are null values in one or more of '", "'the input columns.'", ")", "# get the order of the coefficients in the same order as the", "# columns in the design matrix", "coeffs", "=", "[", "self", ".", "fit_parameters", "[", "'Coefficient'", "]", "[", "x", "]", "for", "x", "in", "model_design", ".", "columns", "]", "# probabilities are returned from mnl_simulate as a 2d array", "# with choosers along rows and alternatives along columns", "if", "self", ".", "probability_mode", "==", "'single_chooser'", ":", "numalts", "=", "len", "(", "merged", ")", "else", ":", "numalts", "=", "sample_size", "probabilities", "=", "mnl", ".", "mnl_simulate", "(", "model_design", ".", "as_matrix", "(", ")", ",", "coeffs", ",", "numalts", "=", "numalts", ",", "returnprobs", "=", "True", ")", "# want to turn probabilities into a Series with a MultiIndex", "# of chooser IDs and alternative IDs.", "# indexing by chooser ID will get you the probabilities", "# across alternatives for that chooser", "mi", "=", "pd", ".", "MultiIndex", ".", "from_arrays", "(", "[", "merged", "[", "'join_index'", "]", ".", "values", ",", "merged", ".", "index", ".", "values", "]", ",", "names", "=", "(", "'chooser_id'", ",", "'alternative_id'", ")", ")", "probabilities", "=", "pd", ".", "Series", "(", "probabilities", ".", "flatten", "(", ")", ",", "index", "=", "mi", ")", "logger", ".", "debug", "(", "'finish: calculate probabilities for LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "probabilities" ]
Returns the probabilities for a set of choosers to choose from among a set of alternatives. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. filter_tables : bool, optional If True, filter `choosers` and `alternatives` with prediction filters before calculating probabilities. Returns ------- probabilities : pandas.Series Probability of selection associated with each chooser and alternative. Index will be a MultiIndex with alternative IDs in the inner index and chooser IDs in the out index.
[ "Returns", "the", "probabilities", "for", "a", "set", "of", "choosers", "to", "choose", "from", "among", "a", "set", "of", "alternatives", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L474-L560
2,945
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.summed_probabilities
def summed_probabilities(self, choosers, alternatives): """ Calculate total probability associated with each alternative. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Total probability associated with each alternative. """ def normalize(s): return s / s.sum() choosers, alternatives = self.apply_predict_filters( choosers, alternatives) probs = self.probabilities(choosers, alternatives, filter_tables=False) # groupby the the alternatives ID and sum if self.probability_mode == 'single_chooser': return ( normalize(probs) * len(choosers) ).reset_index(level=0, drop=True) elif self.probability_mode == 'full_product': return probs.groupby(level=0).apply(normalize)\ .groupby(level=1).sum() else: raise ValueError( 'Unrecognized probability_mode option: {}'.format( self.probability_mode))
python
def summed_probabilities(self, choosers, alternatives): """ Calculate total probability associated with each alternative. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Total probability associated with each alternative. """ def normalize(s): return s / s.sum() choosers, alternatives = self.apply_predict_filters( choosers, alternatives) probs = self.probabilities(choosers, alternatives, filter_tables=False) # groupby the the alternatives ID and sum if self.probability_mode == 'single_chooser': return ( normalize(probs) * len(choosers) ).reset_index(level=0, drop=True) elif self.probability_mode == 'full_product': return probs.groupby(level=0).apply(normalize)\ .groupby(level=1).sum() else: raise ValueError( 'Unrecognized probability_mode option: {}'.format( self.probability_mode))
[ "def", "summed_probabilities", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "def", "normalize", "(", "s", ")", ":", "return", "s", "/", "s", ".", "sum", "(", ")", "choosers", ",", "alternatives", "=", "self", ".", "apply_predict_filters", "(", "choosers", ",", "alternatives", ")", "probs", "=", "self", ".", "probabilities", "(", "choosers", ",", "alternatives", ",", "filter_tables", "=", "False", ")", "# groupby the the alternatives ID and sum", "if", "self", ".", "probability_mode", "==", "'single_chooser'", ":", "return", "(", "normalize", "(", "probs", ")", "*", "len", "(", "choosers", ")", ")", ".", "reset_index", "(", "level", "=", "0", ",", "drop", "=", "True", ")", "elif", "self", ".", "probability_mode", "==", "'full_product'", ":", "return", "probs", ".", "groupby", "(", "level", "=", "0", ")", ".", "apply", "(", "normalize", ")", ".", "groupby", "(", "level", "=", "1", ")", ".", "sum", "(", ")", "else", ":", "raise", "ValueError", "(", "'Unrecognized probability_mode option: {}'", ".", "format", "(", "self", ".", "probability_mode", ")", ")" ]
Calculate total probability associated with each alternative. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Total probability associated with each alternative.
[ "Calculate", "total", "probability", "associated", "with", "each", "alternative", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L562-L597
2,946
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.predict
def predict(self, choosers, alternatives, debug=False): """ Choose from among alternatives for a group of agents. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. debug : bool If debug is set to true, will set the variable "sim_pdf" on the object to store the probabilities for mapping of the outcome. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. """ self.assert_fitted() logger.debug('start: predict LCM model {}'.format(self.name)) choosers, alternatives = self.apply_predict_filters( choosers, alternatives) if len(choosers) == 0: return pd.Series() if len(alternatives) == 0: return pd.Series(index=choosers.index) probabilities = self.probabilities( choosers, alternatives, filter_tables=False) if debug: self.sim_pdf = probabilities if self.choice_mode == 'aggregate': choices = unit_choice( choosers.index.values, probabilities.index.get_level_values('alternative_id').values, probabilities.values) elif self.choice_mode == 'individual': def mkchoice(probs): probs.reset_index(0, drop=True, inplace=True) return np.random.choice( probs.index.values, p=probs.values / probs.sum()) choices = probabilities.groupby(level='chooser_id', sort=False)\ .apply(mkchoice) else: raise ValueError( 'Unrecognized choice_mode option: {}'.format(self.choice_mode)) logger.debug('finish: predict LCM model {}'.format(self.name)) return choices
python
def predict(self, choosers, alternatives, debug=False): """ Choose from among alternatives for a group of agents. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. debug : bool If debug is set to true, will set the variable "sim_pdf" on the object to store the probabilities for mapping of the outcome. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. """ self.assert_fitted() logger.debug('start: predict LCM model {}'.format(self.name)) choosers, alternatives = self.apply_predict_filters( choosers, alternatives) if len(choosers) == 0: return pd.Series() if len(alternatives) == 0: return pd.Series(index=choosers.index) probabilities = self.probabilities( choosers, alternatives, filter_tables=False) if debug: self.sim_pdf = probabilities if self.choice_mode == 'aggregate': choices = unit_choice( choosers.index.values, probabilities.index.get_level_values('alternative_id').values, probabilities.values) elif self.choice_mode == 'individual': def mkchoice(probs): probs.reset_index(0, drop=True, inplace=True) return np.random.choice( probs.index.values, p=probs.values / probs.sum()) choices = probabilities.groupby(level='chooser_id', sort=False)\ .apply(mkchoice) else: raise ValueError( 'Unrecognized choice_mode option: {}'.format(self.choice_mode)) logger.debug('finish: predict LCM model {}'.format(self.name)) return choices
[ "def", "predict", "(", "self", ",", "choosers", ",", "alternatives", ",", "debug", "=", "False", ")", ":", "self", ".", "assert_fitted", "(", ")", "logger", ".", "debug", "(", "'start: predict LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "choosers", ",", "alternatives", "=", "self", ".", "apply_predict_filters", "(", "choosers", ",", "alternatives", ")", "if", "len", "(", "choosers", ")", "==", "0", ":", "return", "pd", ".", "Series", "(", ")", "if", "len", "(", "alternatives", ")", "==", "0", ":", "return", "pd", ".", "Series", "(", "index", "=", "choosers", ".", "index", ")", "probabilities", "=", "self", ".", "probabilities", "(", "choosers", ",", "alternatives", ",", "filter_tables", "=", "False", ")", "if", "debug", ":", "self", ".", "sim_pdf", "=", "probabilities", "if", "self", ".", "choice_mode", "==", "'aggregate'", ":", "choices", "=", "unit_choice", "(", "choosers", ".", "index", ".", "values", ",", "probabilities", ".", "index", ".", "get_level_values", "(", "'alternative_id'", ")", ".", "values", ",", "probabilities", ".", "values", ")", "elif", "self", ".", "choice_mode", "==", "'individual'", ":", "def", "mkchoice", "(", "probs", ")", ":", "probs", ".", "reset_index", "(", "0", ",", "drop", "=", "True", ",", "inplace", "=", "True", ")", "return", "np", ".", "random", ".", "choice", "(", "probs", ".", "index", ".", "values", ",", "p", "=", "probs", ".", "values", "/", "probs", ".", "sum", "(", ")", ")", "choices", "=", "probabilities", ".", "groupby", "(", "level", "=", "'chooser_id'", ",", "sort", "=", "False", ")", ".", "apply", "(", "mkchoice", ")", "else", ":", "raise", "ValueError", "(", "'Unrecognized choice_mode option: {}'", ".", "format", "(", "self", ".", "choice_mode", ")", ")", "logger", ".", "debug", "(", "'finish: predict LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "choices" ]
Choose from among alternatives for a group of agents. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. debug : bool If debug is set to true, will set the variable "sim_pdf" on the object to store the probabilities for mapping of the outcome. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers.
[ "Choose", "from", "among", "alternatives", "for", "a", "group", "of", "agents", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L599-L657
2,947
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.to_dict
def to_dict(self): """ Return a dict respresentation of an MNLDiscreteChoiceModel instance. """ return { 'model_type': 'discretechoice', 'model_expression': self.model_expression, 'sample_size': self.sample_size, 'name': self.name, 'probability_mode': self.probability_mode, 'choice_mode': self.choice_mode, 'choosers_fit_filters': self.choosers_fit_filters, 'choosers_predict_filters': self.choosers_predict_filters, 'alts_fit_filters': self.alts_fit_filters, 'alts_predict_filters': self.alts_predict_filters, 'interaction_predict_filters': self.interaction_predict_filters, 'estimation_sample_size': self.estimation_sample_size, 'prediction_sample_size': self.prediction_sample_size, 'choice_column': self.choice_column, 'fitted': self.fitted, 'log_likelihoods': self.log_likelihoods, 'fit_parameters': (yamlio.frame_to_yaml_safe(self.fit_parameters) if self.fitted else None) }
python
def to_dict(self): """ Return a dict respresentation of an MNLDiscreteChoiceModel instance. """ return { 'model_type': 'discretechoice', 'model_expression': self.model_expression, 'sample_size': self.sample_size, 'name': self.name, 'probability_mode': self.probability_mode, 'choice_mode': self.choice_mode, 'choosers_fit_filters': self.choosers_fit_filters, 'choosers_predict_filters': self.choosers_predict_filters, 'alts_fit_filters': self.alts_fit_filters, 'alts_predict_filters': self.alts_predict_filters, 'interaction_predict_filters': self.interaction_predict_filters, 'estimation_sample_size': self.estimation_sample_size, 'prediction_sample_size': self.prediction_sample_size, 'choice_column': self.choice_column, 'fitted': self.fitted, 'log_likelihoods': self.log_likelihoods, 'fit_parameters': (yamlio.frame_to_yaml_safe(self.fit_parameters) if self.fitted else None) }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'model_type'", ":", "'discretechoice'", ",", "'model_expression'", ":", "self", ".", "model_expression", ",", "'sample_size'", ":", "self", ".", "sample_size", ",", "'name'", ":", "self", ".", "name", ",", "'probability_mode'", ":", "self", ".", "probability_mode", ",", "'choice_mode'", ":", "self", ".", "choice_mode", ",", "'choosers_fit_filters'", ":", "self", ".", "choosers_fit_filters", ",", "'choosers_predict_filters'", ":", "self", ".", "choosers_predict_filters", ",", "'alts_fit_filters'", ":", "self", ".", "alts_fit_filters", ",", "'alts_predict_filters'", ":", "self", ".", "alts_predict_filters", ",", "'interaction_predict_filters'", ":", "self", ".", "interaction_predict_filters", ",", "'estimation_sample_size'", ":", "self", ".", "estimation_sample_size", ",", "'prediction_sample_size'", ":", "self", ".", "prediction_sample_size", ",", "'choice_column'", ":", "self", ".", "choice_column", ",", "'fitted'", ":", "self", ".", "fitted", ",", "'log_likelihoods'", ":", "self", ".", "log_likelihoods", ",", "'fit_parameters'", ":", "(", "yamlio", ".", "frame_to_yaml_safe", "(", "self", ".", "fit_parameters", ")", "if", "self", ".", "fitted", "else", "None", ")", "}" ]
Return a dict respresentation of an MNLDiscreteChoiceModel instance.
[ "Return", "a", "dict", "respresentation", "of", "an", "MNLDiscreteChoiceModel", "instance", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L659-L684
2,948
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.choosers_columns_used
def choosers_columns_used(self): """ Columns from the choosers table that are used for filtering. """ return list(tz.unique(tz.concatv( util.columns_in_filters(self.choosers_predict_filters), util.columns_in_filters(self.choosers_fit_filters))))
python
def choosers_columns_used(self): """ Columns from the choosers table that are used for filtering. """ return list(tz.unique(tz.concatv( util.columns_in_filters(self.choosers_predict_filters), util.columns_in_filters(self.choosers_fit_filters))))
[ "def", "choosers_columns_used", "(", "self", ")", ":", "return", "list", "(", "tz", ".", "unique", "(", "tz", ".", "concatv", "(", "util", ".", "columns_in_filters", "(", "self", ".", "choosers_predict_filters", ")", ",", "util", ".", "columns_in_filters", "(", "self", ".", "choosers_fit_filters", ")", ")", ")", ")" ]
Columns from the choosers table that are used for filtering.
[ "Columns", "from", "the", "choosers", "table", "that", "are", "used", "for", "filtering", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L712-L719
2,949
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.interaction_columns_used
def interaction_columns_used(self): """ Columns from the interaction dataset used for filtering and in the model. These may come originally from either the choosers or alternatives tables. """ return list(tz.unique(tz.concatv( util.columns_in_filters(self.interaction_predict_filters), util.columns_in_formula(self.model_expression))))
python
def interaction_columns_used(self): """ Columns from the interaction dataset used for filtering and in the model. These may come originally from either the choosers or alternatives tables. """ return list(tz.unique(tz.concatv( util.columns_in_filters(self.interaction_predict_filters), util.columns_in_formula(self.model_expression))))
[ "def", "interaction_columns_used", "(", "self", ")", ":", "return", "list", "(", "tz", ".", "unique", "(", "tz", ".", "concatv", "(", "util", ".", "columns_in_filters", "(", "self", ".", "interaction_predict_filters", ")", ",", "util", ".", "columns_in_formula", "(", "self", ".", "model_expression", ")", ")", ")", ")" ]
Columns from the interaction dataset used for filtering and in the model. These may come originally from either the choosers or alternatives tables.
[ "Columns", "from", "the", "interaction", "dataset", "used", "for", "filtering", "and", "in", "the", "model", ".", "These", "may", "come", "originally", "from", "either", "the", "choosers", "or", "alternatives", "tables", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L730-L739
2,950
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.predict_from_cfg
def predict_from_cfg(cls, choosers, alternatives, cfgname=None, cfg=None, alternative_ratio=2.0, debug=False): """ Simulate choices for the specified choosers Parameters ---------- choosers : DataFrame A dataframe of agents doing the choosing. alternatives : DataFrame A dataframe of locations which the choosers are locating in and which have a supply. cfgname : string The name of the yaml config file from which to read the discrete choice model. cfg: string an ordered yaml string of the model discrete choice model configuration. Used to read config from memory in lieu of loading cfgname from disk. alternative_ratio : float, optional Above the ratio of alternatives to choosers (default of 2.0), the alternatives will be sampled to meet this ratio (for performance reasons). debug : boolean, optional (default False) Whether to generate debug information on the model. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. lcm : MNLDiscreteChoiceModel which was used to predict """ logger.debug('start: predict from configuration {}'.format(cfgname)) if cfgname: lcm = cls.from_yaml(str_or_buffer=cfgname) elif cfg: lcm = cls.from_yaml(yaml_str=cfg) else: msg = 'predict_from_cfg requires a configuration via the cfgname or cfg arguments' logger.error(msg) raise ValueError(msg) if len(alternatives) > len(choosers) * alternative_ratio: logger.info( ("Alternative ratio exceeded: %d alternatives " "and only %d choosers") % (len(alternatives), len(choosers))) idxes = np.random.choice( alternatives.index, size=int(len(choosers) * alternative_ratio), replace=False) alternatives = alternatives.loc[idxes] logger.info( " after sampling %d alternatives are available\n" % len(alternatives)) new_units = lcm.predict(choosers, alternatives, debug=debug) print("Assigned %d choosers to new units" % len(new_units.dropna())) logger.debug('finish: predict from configuration {}'.format(cfgname)) return new_units, lcm
python
def predict_from_cfg(cls, choosers, alternatives, cfgname=None, cfg=None, alternative_ratio=2.0, debug=False): """ Simulate choices for the specified choosers Parameters ---------- choosers : DataFrame A dataframe of agents doing the choosing. alternatives : DataFrame A dataframe of locations which the choosers are locating in and which have a supply. cfgname : string The name of the yaml config file from which to read the discrete choice model. cfg: string an ordered yaml string of the model discrete choice model configuration. Used to read config from memory in lieu of loading cfgname from disk. alternative_ratio : float, optional Above the ratio of alternatives to choosers (default of 2.0), the alternatives will be sampled to meet this ratio (for performance reasons). debug : boolean, optional (default False) Whether to generate debug information on the model. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. lcm : MNLDiscreteChoiceModel which was used to predict """ logger.debug('start: predict from configuration {}'.format(cfgname)) if cfgname: lcm = cls.from_yaml(str_or_buffer=cfgname) elif cfg: lcm = cls.from_yaml(yaml_str=cfg) else: msg = 'predict_from_cfg requires a configuration via the cfgname or cfg arguments' logger.error(msg) raise ValueError(msg) if len(alternatives) > len(choosers) * alternative_ratio: logger.info( ("Alternative ratio exceeded: %d alternatives " "and only %d choosers") % (len(alternatives), len(choosers))) idxes = np.random.choice( alternatives.index, size=int(len(choosers) * alternative_ratio), replace=False) alternatives = alternatives.loc[idxes] logger.info( " after sampling %d alternatives are available\n" % len(alternatives)) new_units = lcm.predict(choosers, alternatives, debug=debug) print("Assigned %d choosers to new units" % len(new_units.dropna())) logger.debug('finish: predict from configuration {}'.format(cfgname)) return new_units, lcm
[ "def", "predict_from_cfg", "(", "cls", ",", "choosers", ",", "alternatives", ",", "cfgname", "=", "None", ",", "cfg", "=", "None", ",", "alternative_ratio", "=", "2.0", ",", "debug", "=", "False", ")", ":", "logger", ".", "debug", "(", "'start: predict from configuration {}'", ".", "format", "(", "cfgname", ")", ")", "if", "cfgname", ":", "lcm", "=", "cls", ".", "from_yaml", "(", "str_or_buffer", "=", "cfgname", ")", "elif", "cfg", ":", "lcm", "=", "cls", ".", "from_yaml", "(", "yaml_str", "=", "cfg", ")", "else", ":", "msg", "=", "'predict_from_cfg requires a configuration via the cfgname or cfg arguments'", "logger", ".", "error", "(", "msg", ")", "raise", "ValueError", "(", "msg", ")", "if", "len", "(", "alternatives", ")", ">", "len", "(", "choosers", ")", "*", "alternative_ratio", ":", "logger", ".", "info", "(", "(", "\"Alternative ratio exceeded: %d alternatives \"", "\"and only %d choosers\"", ")", "%", "(", "len", "(", "alternatives", ")", ",", "len", "(", "choosers", ")", ")", ")", "idxes", "=", "np", ".", "random", ".", "choice", "(", "alternatives", ".", "index", ",", "size", "=", "int", "(", "len", "(", "choosers", ")", "*", "alternative_ratio", ")", ",", "replace", "=", "False", ")", "alternatives", "=", "alternatives", ".", "loc", "[", "idxes", "]", "logger", ".", "info", "(", "\" after sampling %d alternatives are available\\n\"", "%", "len", "(", "alternatives", ")", ")", "new_units", "=", "lcm", ".", "predict", "(", "choosers", ",", "alternatives", ",", "debug", "=", "debug", ")", "print", "(", "\"Assigned %d choosers to new units\"", "%", "len", "(", "new_units", ".", "dropna", "(", ")", ")", ")", "logger", ".", "debug", "(", "'finish: predict from configuration {}'", ".", "format", "(", "cfgname", ")", ")", "return", "new_units", ",", "lcm" ]
Simulate choices for the specified choosers Parameters ---------- choosers : DataFrame A dataframe of agents doing the choosing. alternatives : DataFrame A dataframe of locations which the choosers are locating in and which have a supply. cfgname : string The name of the yaml config file from which to read the discrete choice model. cfg: string an ordered yaml string of the model discrete choice model configuration. Used to read config from memory in lieu of loading cfgname from disk. alternative_ratio : float, optional Above the ratio of alternatives to choosers (default of 2.0), the alternatives will be sampled to meet this ratio (for performance reasons). debug : boolean, optional (default False) Whether to generate debug information on the model. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. lcm : MNLDiscreteChoiceModel which was used to predict
[ "Simulate", "choices", "for", "the", "specified", "choosers" ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L787-L847
2,951
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.add_model_from_params
def add_model_from_params( self, name, model_expression, sample_size, probability_mode='full_product', choice_mode='individual', choosers_fit_filters=None, choosers_predict_filters=None, alts_fit_filters=None, alts_predict_filters=None, interaction_predict_filters=None, estimation_sample_size=None, prediction_sample_size=None, choice_column=None): """ Add a model by passing parameters through to MNLDiscreteChoiceModel. Parameters ---------- name Must match a segment in the choosers table. model_expression : str, iterable, or dict A patsy model expression. Should contain only a right-hand side. sample_size : int Number of choices to sample for estimating the model. probability_mode : str, optional Specify the method to use for calculating probabilities during prediction. Available string options are 'single_chooser' and 'full_product'. In "single chooser" mode one agent is chosen for calculating probabilities across all alternatives. In "full product" mode probabilities are calculated for every chooser across all alternatives. choice_mode : str or callable, optional Specify the method to use for making choices among alternatives. Available string options are 'individual' and 'aggregate'. In "individual" mode choices will be made separately for each chooser. In "aggregate" mode choices are made for all choosers at once. Aggregate mode implies that an alternative chosen by one agent is unavailable to other agents and that the same probabilities can be used for all choosers. choosers_fit_filters : list of str, optional Filters applied to choosers table before fitting the model. choosers_predict_filters : list of str, optional Filters applied to the choosers table before calculating new data points. alts_fit_filters : list of str, optional Filters applied to the alternatives table before fitting the model. alts_predict_filters : list of str, optional Filters applied to the alternatives table before calculating new data points. interaction_predict_filters : list of str, optional Filters applied to the merged choosers/alternatives table before predicting agent choices. estimation_sample_size : int, optional Whether to sample choosers during estimation (needs to be applied after choosers_fit_filters) prediction_sample_size : int, optional Whether (and how much) to sample alternatives during prediction. Note that this can lead to multiple choosers picking the same alternative. choice_column : optional Name of the column in the `alternatives` table that choosers should choose. e.g. the 'building_id' column. If not provided the alternatives index is used. """ logger.debug('adding model {} to LCM group {}'.format(name, self.name)) self.models[name] = MNLDiscreteChoiceModel( model_expression, sample_size, probability_mode, choice_mode, choosers_fit_filters, choosers_predict_filters, alts_fit_filters, alts_predict_filters, interaction_predict_filters, estimation_sample_size, prediction_sample_size, choice_column, name)
python
def add_model_from_params( self, name, model_expression, sample_size, probability_mode='full_product', choice_mode='individual', choosers_fit_filters=None, choosers_predict_filters=None, alts_fit_filters=None, alts_predict_filters=None, interaction_predict_filters=None, estimation_sample_size=None, prediction_sample_size=None, choice_column=None): """ Add a model by passing parameters through to MNLDiscreteChoiceModel. Parameters ---------- name Must match a segment in the choosers table. model_expression : str, iterable, or dict A patsy model expression. Should contain only a right-hand side. sample_size : int Number of choices to sample for estimating the model. probability_mode : str, optional Specify the method to use for calculating probabilities during prediction. Available string options are 'single_chooser' and 'full_product'. In "single chooser" mode one agent is chosen for calculating probabilities across all alternatives. In "full product" mode probabilities are calculated for every chooser across all alternatives. choice_mode : str or callable, optional Specify the method to use for making choices among alternatives. Available string options are 'individual' and 'aggregate'. In "individual" mode choices will be made separately for each chooser. In "aggregate" mode choices are made for all choosers at once. Aggregate mode implies that an alternative chosen by one agent is unavailable to other agents and that the same probabilities can be used for all choosers. choosers_fit_filters : list of str, optional Filters applied to choosers table before fitting the model. choosers_predict_filters : list of str, optional Filters applied to the choosers table before calculating new data points. alts_fit_filters : list of str, optional Filters applied to the alternatives table before fitting the model. alts_predict_filters : list of str, optional Filters applied to the alternatives table before calculating new data points. interaction_predict_filters : list of str, optional Filters applied to the merged choosers/alternatives table before predicting agent choices. estimation_sample_size : int, optional Whether to sample choosers during estimation (needs to be applied after choosers_fit_filters) prediction_sample_size : int, optional Whether (and how much) to sample alternatives during prediction. Note that this can lead to multiple choosers picking the same alternative. choice_column : optional Name of the column in the `alternatives` table that choosers should choose. e.g. the 'building_id' column. If not provided the alternatives index is used. """ logger.debug('adding model {} to LCM group {}'.format(name, self.name)) self.models[name] = MNLDiscreteChoiceModel( model_expression, sample_size, probability_mode, choice_mode, choosers_fit_filters, choosers_predict_filters, alts_fit_filters, alts_predict_filters, interaction_predict_filters, estimation_sample_size, prediction_sample_size, choice_column, name)
[ "def", "add_model_from_params", "(", "self", ",", "name", ",", "model_expression", ",", "sample_size", ",", "probability_mode", "=", "'full_product'", ",", "choice_mode", "=", "'individual'", ",", "choosers_fit_filters", "=", "None", ",", "choosers_predict_filters", "=", "None", ",", "alts_fit_filters", "=", "None", ",", "alts_predict_filters", "=", "None", ",", "interaction_predict_filters", "=", "None", ",", "estimation_sample_size", "=", "None", ",", "prediction_sample_size", "=", "None", ",", "choice_column", "=", "None", ")", ":", "logger", ".", "debug", "(", "'adding model {} to LCM group {}'", ".", "format", "(", "name", ",", "self", ".", "name", ")", ")", "self", ".", "models", "[", "name", "]", "=", "MNLDiscreteChoiceModel", "(", "model_expression", ",", "sample_size", ",", "probability_mode", ",", "choice_mode", ",", "choosers_fit_filters", ",", "choosers_predict_filters", ",", "alts_fit_filters", ",", "alts_predict_filters", ",", "interaction_predict_filters", ",", "estimation_sample_size", ",", "prediction_sample_size", ",", "choice_column", ",", "name", ")" ]
Add a model by passing parameters through to MNLDiscreteChoiceModel. Parameters ---------- name Must match a segment in the choosers table. model_expression : str, iterable, or dict A patsy model expression. Should contain only a right-hand side. sample_size : int Number of choices to sample for estimating the model. probability_mode : str, optional Specify the method to use for calculating probabilities during prediction. Available string options are 'single_chooser' and 'full_product'. In "single chooser" mode one agent is chosen for calculating probabilities across all alternatives. In "full product" mode probabilities are calculated for every chooser across all alternatives. choice_mode : str or callable, optional Specify the method to use for making choices among alternatives. Available string options are 'individual' and 'aggregate'. In "individual" mode choices will be made separately for each chooser. In "aggregate" mode choices are made for all choosers at once. Aggregate mode implies that an alternative chosen by one agent is unavailable to other agents and that the same probabilities can be used for all choosers. choosers_fit_filters : list of str, optional Filters applied to choosers table before fitting the model. choosers_predict_filters : list of str, optional Filters applied to the choosers table before calculating new data points. alts_fit_filters : list of str, optional Filters applied to the alternatives table before fitting the model. alts_predict_filters : list of str, optional Filters applied to the alternatives table before calculating new data points. interaction_predict_filters : list of str, optional Filters applied to the merged choosers/alternatives table before predicting agent choices. estimation_sample_size : int, optional Whether to sample choosers during estimation (needs to be applied after choosers_fit_filters) prediction_sample_size : int, optional Whether (and how much) to sample alternatives during prediction. Note that this can lead to multiple choosers picking the same alternative. choice_column : optional Name of the column in the `alternatives` table that choosers should choose. e.g. the 'building_id' column. If not provided the alternatives index is used.
[ "Add", "a", "model", "by", "passing", "parameters", "through", "to", "MNLDiscreteChoiceModel", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L893-L960
2,952
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.apply_fit_filters
def apply_fit_filters(self, choosers, alternatives): """ Filter `choosers` and `alternatives` for fitting. This is done by filtering each submodel and concatenating the results. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame """ ch = [] alts = [] for name, df in self._iter_groups(choosers): filtered_choosers, filtered_alts = \ self.models[name].apply_fit_filters(df, alternatives) ch.append(filtered_choosers) alts.append(filtered_alts) return pd.concat(ch), pd.concat(alts)
python
def apply_fit_filters(self, choosers, alternatives): """ Filter `choosers` and `alternatives` for fitting. This is done by filtering each submodel and concatenating the results. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame """ ch = [] alts = [] for name, df in self._iter_groups(choosers): filtered_choosers, filtered_alts = \ self.models[name].apply_fit_filters(df, alternatives) ch.append(filtered_choosers) alts.append(filtered_alts) return pd.concat(ch), pd.concat(alts)
[ "def", "apply_fit_filters", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "ch", "=", "[", "]", "alts", "=", "[", "]", "for", "name", ",", "df", "in", "self", ".", "_iter_groups", "(", "choosers", ")", ":", "filtered_choosers", ",", "filtered_alts", "=", "self", ".", "models", "[", "name", "]", ".", "apply_fit_filters", "(", "df", ",", "alternatives", ")", "ch", ".", "append", "(", "filtered_choosers", ")", "alts", ".", "append", "(", "filtered_alts", ")", "return", "pd", ".", "concat", "(", "ch", ")", ",", "pd", ".", "concat", "(", "alts", ")" ]
Filter `choosers` and `alternatives` for fitting. This is done by filtering each submodel and concatenating the results. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame
[ "Filter", "choosers", "and", "alternatives", "for", "fitting", ".", "This", "is", "done", "by", "filtering", "each", "submodel", "and", "concatenating", "the", "results", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L986-L1014
2,953
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.fit
def fit(self, choosers, alternatives, current_choice): """ Fit and save models based on given data after segmenting the `choosers` table. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column with the same name as the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice Name of column in `choosers` that indicates which alternative they have currently chosen. Returns ------- log_likelihoods : dict of dict Keys will be model names and values will be dictionaries of log-liklihood values as returned by MNLDiscreteChoiceModel.fit. """ with log_start_finish( 'fit models in LCM group {}'.format(self.name), logger): return { name: self.models[name].fit(df, alternatives, current_choice) for name, df in self._iter_groups(choosers)}
python
def fit(self, choosers, alternatives, current_choice): """ Fit and save models based on given data after segmenting the `choosers` table. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column with the same name as the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice Name of column in `choosers` that indicates which alternative they have currently chosen. Returns ------- log_likelihoods : dict of dict Keys will be model names and values will be dictionaries of log-liklihood values as returned by MNLDiscreteChoiceModel.fit. """ with log_start_finish( 'fit models in LCM group {}'.format(self.name), logger): return { name: self.models[name].fit(df, alternatives, current_choice) for name, df in self._iter_groups(choosers)}
[ "def", "fit", "(", "self", ",", "choosers", ",", "alternatives", ",", "current_choice", ")", ":", "with", "log_start_finish", "(", "'fit models in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ",", "logger", ")", ":", "return", "{", "name", ":", "self", ".", "models", "[", "name", "]", ".", "fit", "(", "df", ",", "alternatives", ",", "current_choice", ")", "for", "name", ",", "df", "in", "self", ".", "_iter_groups", "(", "choosers", ")", "}" ]
Fit and save models based on given data after segmenting the `choosers` table. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column with the same name as the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice Name of column in `choosers` that indicates which alternative they have currently chosen. Returns ------- log_likelihoods : dict of dict Keys will be model names and values will be dictionaries of log-liklihood values as returned by MNLDiscreteChoiceModel.fit.
[ "Fit", "and", "save", "models", "based", "on", "given", "data", "after", "segmenting", "the", "choosers", "table", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1049-L1078
2,954
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.fitted
def fitted(self): """ Whether all models in the group have been fitted. """ return (all(m.fitted for m in self.models.values()) if self.models else False)
python
def fitted(self): """ Whether all models in the group have been fitted. """ return (all(m.fitted for m in self.models.values()) if self.models else False)
[ "def", "fitted", "(", "self", ")", ":", "return", "(", "all", "(", "m", ".", "fitted", "for", "m", "in", "self", ".", "models", ".", "values", "(", ")", ")", "if", "self", ".", "models", "else", "False", ")" ]
Whether all models in the group have been fitted.
[ "Whether", "all", "models", "in", "the", "group", "have", "been", "fitted", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1081-L1087
2,955
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.summed_probabilities
def summed_probabilities(self, choosers, alternatives): """ Returns the sum of probabilities for alternatives across all chooser segments. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Summed probabilities from each segment added together. """ if len(alternatives) == 0 or len(choosers) == 0: return pd.Series() logger.debug( 'start: calculate summed probabilities in LCM group {}'.format( self.name)) probs = [] for name, df in self._iter_groups(choosers): probs.append( self.models[name].summed_probabilities(df, alternatives)) add = tz.curry(pd.Series.add, fill_value=0) probs = tz.reduce(add, probs) logger.debug( 'finish: calculate summed probabilities in LCM group {}'.format( self.name)) return probs
python
def summed_probabilities(self, choosers, alternatives): """ Returns the sum of probabilities for alternatives across all chooser segments. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Summed probabilities from each segment added together. """ if len(alternatives) == 0 or len(choosers) == 0: return pd.Series() logger.debug( 'start: calculate summed probabilities in LCM group {}'.format( self.name)) probs = [] for name, df in self._iter_groups(choosers): probs.append( self.models[name].summed_probabilities(df, alternatives)) add = tz.curry(pd.Series.add, fill_value=0) probs = tz.reduce(add, probs) logger.debug( 'finish: calculate summed probabilities in LCM group {}'.format( self.name)) return probs
[ "def", "summed_probabilities", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "if", "len", "(", "alternatives", ")", "==", "0", "or", "len", "(", "choosers", ")", "==", "0", ":", "return", "pd", ".", "Series", "(", ")", "logger", ".", "debug", "(", "'start: calculate summed probabilities in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ")", "probs", "=", "[", "]", "for", "name", ",", "df", "in", "self", ".", "_iter_groups", "(", "choosers", ")", ":", "probs", ".", "append", "(", "self", ".", "models", "[", "name", "]", ".", "summed_probabilities", "(", "df", ",", "alternatives", ")", ")", "add", "=", "tz", ".", "curry", "(", "pd", ".", "Series", ".", "add", ",", "fill_value", "=", "0", ")", "probs", "=", "tz", ".", "reduce", "(", "add", ",", "probs", ")", "logger", ".", "debug", "(", "'finish: calculate summed probabilities in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "probs" ]
Returns the sum of probabilities for alternatives across all chooser segments. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Summed probabilities from each segment added together.
[ "Returns", "the", "sum", "of", "probabilities", "for", "alternatives", "across", "all", "chooser", "segments", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1119-L1156
2,956
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.from_yaml
def from_yaml(cls, yaml_str=None, str_or_buffer=None): """ Create a SegmentedMNLDiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- SegmentedMNLDiscreteChoiceModel """ cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) default_model_expr = cfg['default_config']['model_expression'] seg = cls( cfg['segmentation_col'], cfg['sample_size'], cfg['probability_mode'], cfg['choice_mode'], cfg['choosers_fit_filters'], cfg['choosers_predict_filters'], cfg['alts_fit_filters'], cfg['alts_predict_filters'], cfg['interaction_predict_filters'], cfg['estimation_sample_size'], cfg['prediction_sample_size'], cfg['choice_column'], default_model_expr, cfg['remove_alts'], cfg['name']) if "models" not in cfg: cfg["models"] = {} for name, m in cfg['models'].items(): m['model_expression'] = m.get( 'model_expression', default_model_expr) m['sample_size'] = cfg['sample_size'] m['probability_mode'] = cfg['probability_mode'] m['choice_mode'] = cfg['choice_mode'] m['choosers_fit_filters'] = None m['choosers_predict_filters'] = None m['alts_fit_filters'] = None m['alts_predict_filters'] = None m['interaction_predict_filters'] = \ cfg['interaction_predict_filters'] m['estimation_sample_size'] = cfg['estimation_sample_size'] m['prediction_sample_size'] = cfg['prediction_sample_size'] m['choice_column'] = cfg['choice_column'] model = MNLDiscreteChoiceModel.from_yaml( yamlio.convert_to_yaml(m, None)) seg._group.add_model(model) logger.debug( 'loaded segmented LCM model {} from YAML'.format(seg.name)) return seg
python
def from_yaml(cls, yaml_str=None, str_or_buffer=None): """ Create a SegmentedMNLDiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- SegmentedMNLDiscreteChoiceModel """ cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) default_model_expr = cfg['default_config']['model_expression'] seg = cls( cfg['segmentation_col'], cfg['sample_size'], cfg['probability_mode'], cfg['choice_mode'], cfg['choosers_fit_filters'], cfg['choosers_predict_filters'], cfg['alts_fit_filters'], cfg['alts_predict_filters'], cfg['interaction_predict_filters'], cfg['estimation_sample_size'], cfg['prediction_sample_size'], cfg['choice_column'], default_model_expr, cfg['remove_alts'], cfg['name']) if "models" not in cfg: cfg["models"] = {} for name, m in cfg['models'].items(): m['model_expression'] = m.get( 'model_expression', default_model_expr) m['sample_size'] = cfg['sample_size'] m['probability_mode'] = cfg['probability_mode'] m['choice_mode'] = cfg['choice_mode'] m['choosers_fit_filters'] = None m['choosers_predict_filters'] = None m['alts_fit_filters'] = None m['alts_predict_filters'] = None m['interaction_predict_filters'] = \ cfg['interaction_predict_filters'] m['estimation_sample_size'] = cfg['estimation_sample_size'] m['prediction_sample_size'] = cfg['prediction_sample_size'] m['choice_column'] = cfg['choice_column'] model = MNLDiscreteChoiceModel.from_yaml( yamlio.convert_to_yaml(m, None)) seg._group.add_model(model) logger.debug( 'loaded segmented LCM model {} from YAML'.format(seg.name)) return seg
[ "def", "from_yaml", "(", "cls", ",", "yaml_str", "=", "None", ",", "str_or_buffer", "=", "None", ")", ":", "cfg", "=", "yamlio", ".", "yaml_to_dict", "(", "yaml_str", ",", "str_or_buffer", ")", "default_model_expr", "=", "cfg", "[", "'default_config'", "]", "[", "'model_expression'", "]", "seg", "=", "cls", "(", "cfg", "[", "'segmentation_col'", "]", ",", "cfg", "[", "'sample_size'", "]", ",", "cfg", "[", "'probability_mode'", "]", ",", "cfg", "[", "'choice_mode'", "]", ",", "cfg", "[", "'choosers_fit_filters'", "]", ",", "cfg", "[", "'choosers_predict_filters'", "]", ",", "cfg", "[", "'alts_fit_filters'", "]", ",", "cfg", "[", "'alts_predict_filters'", "]", ",", "cfg", "[", "'interaction_predict_filters'", "]", ",", "cfg", "[", "'estimation_sample_size'", "]", ",", "cfg", "[", "'prediction_sample_size'", "]", ",", "cfg", "[", "'choice_column'", "]", ",", "default_model_expr", ",", "cfg", "[", "'remove_alts'", "]", ",", "cfg", "[", "'name'", "]", ")", "if", "\"models\"", "not", "in", "cfg", ":", "cfg", "[", "\"models\"", "]", "=", "{", "}", "for", "name", ",", "m", "in", "cfg", "[", "'models'", "]", ".", "items", "(", ")", ":", "m", "[", "'model_expression'", "]", "=", "m", ".", "get", "(", "'model_expression'", ",", "default_model_expr", ")", "m", "[", "'sample_size'", "]", "=", "cfg", "[", "'sample_size'", "]", "m", "[", "'probability_mode'", "]", "=", "cfg", "[", "'probability_mode'", "]", "m", "[", "'choice_mode'", "]", "=", "cfg", "[", "'choice_mode'", "]", "m", "[", "'choosers_fit_filters'", "]", "=", "None", "m", "[", "'choosers_predict_filters'", "]", "=", "None", "m", "[", "'alts_fit_filters'", "]", "=", "None", "m", "[", "'alts_predict_filters'", "]", "=", "None", "m", "[", "'interaction_predict_filters'", "]", "=", "cfg", "[", "'interaction_predict_filters'", "]", "m", "[", "'estimation_sample_size'", "]", "=", "cfg", "[", "'estimation_sample_size'", "]", "m", "[", "'prediction_sample_size'", "]", "=", "cfg", "[", "'prediction_sample_size'", "]", "m", "[", "'choice_column'", "]", "=", "cfg", "[", "'choice_column'", "]", "model", "=", "MNLDiscreteChoiceModel", ".", "from_yaml", "(", "yamlio", ".", "convert_to_yaml", "(", "m", ",", "None", ")", ")", "seg", ".", "_group", ".", "add_model", "(", "model", ")", "logger", ".", "debug", "(", "'loaded segmented LCM model {} from YAML'", ".", "format", "(", "seg", ".", "name", ")", ")", "return", "seg" ]
Create a SegmentedMNLDiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- SegmentedMNLDiscreteChoiceModel
[ "Create", "a", "SegmentedMNLDiscreteChoiceModel", "instance", "from", "a", "saved", "YAML", "configuration", ".", "Arguments", "are", "mutally", "exclusive", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1334-L1397
2,957
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.add_segment
def add_segment(self, name, model_expression=None): """ Add a new segment with its own model expression. Parameters ---------- name Segment name. Must match a segment in the groupby of the data. model_expression : str or dict, optional A patsy model expression that can be used with statsmodels. Should contain both the left- and right-hand sides. If not given the default model will be used, which must not be None. """ logger.debug('adding LCM model {} to segmented model {}'.format( name, self.name)) if not model_expression: if not self.default_model_expr: raise ValueError( 'No default model available, ' 'you must supply a model expression.') model_expression = self.default_model_expr # we'll take care of some of the filtering this side before # segmentation self._group.add_model_from_params( name=name, model_expression=model_expression, sample_size=self.sample_size, probability_mode=self.probability_mode, choice_mode=self.choice_mode, choosers_fit_filters=None, choosers_predict_filters=None, alts_fit_filters=None, alts_predict_filters=None, interaction_predict_filters=self.interaction_predict_filters, estimation_sample_size=self.estimation_sample_size, choice_column=self.choice_column)
python
def add_segment(self, name, model_expression=None): """ Add a new segment with its own model expression. Parameters ---------- name Segment name. Must match a segment in the groupby of the data. model_expression : str or dict, optional A patsy model expression that can be used with statsmodels. Should contain both the left- and right-hand sides. If not given the default model will be used, which must not be None. """ logger.debug('adding LCM model {} to segmented model {}'.format( name, self.name)) if not model_expression: if not self.default_model_expr: raise ValueError( 'No default model available, ' 'you must supply a model expression.') model_expression = self.default_model_expr # we'll take care of some of the filtering this side before # segmentation self._group.add_model_from_params( name=name, model_expression=model_expression, sample_size=self.sample_size, probability_mode=self.probability_mode, choice_mode=self.choice_mode, choosers_fit_filters=None, choosers_predict_filters=None, alts_fit_filters=None, alts_predict_filters=None, interaction_predict_filters=self.interaction_predict_filters, estimation_sample_size=self.estimation_sample_size, choice_column=self.choice_column)
[ "def", "add_segment", "(", "self", ",", "name", ",", "model_expression", "=", "None", ")", ":", "logger", ".", "debug", "(", "'adding LCM model {} to segmented model {}'", ".", "format", "(", "name", ",", "self", ".", "name", ")", ")", "if", "not", "model_expression", ":", "if", "not", "self", ".", "default_model_expr", ":", "raise", "ValueError", "(", "'No default model available, '", "'you must supply a model expression.'", ")", "model_expression", "=", "self", ".", "default_model_expr", "# we'll take care of some of the filtering this side before", "# segmentation", "self", ".", "_group", ".", "add_model_from_params", "(", "name", "=", "name", ",", "model_expression", "=", "model_expression", ",", "sample_size", "=", "self", ".", "sample_size", ",", "probability_mode", "=", "self", ".", "probability_mode", ",", "choice_mode", "=", "self", ".", "choice_mode", ",", "choosers_fit_filters", "=", "None", ",", "choosers_predict_filters", "=", "None", ",", "alts_fit_filters", "=", "None", ",", "alts_predict_filters", "=", "None", ",", "interaction_predict_filters", "=", "self", ".", "interaction_predict_filters", ",", "estimation_sample_size", "=", "self", ".", "estimation_sample_size", ",", "choice_column", "=", "self", ".", "choice_column", ")" ]
Add a new segment with its own model expression. Parameters ---------- name Segment name. Must match a segment in the groupby of the data. model_expression : str or dict, optional A patsy model expression that can be used with statsmodels. Should contain both the left- and right-hand sides. If not given the default model will be used, which must not be None.
[ "Add", "a", "new", "segment", "with", "its", "own", "model", "expression", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1399-L1437
2,958
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.fit
def fit(self, choosers, alternatives, current_choice): """ Fit and save models based on given data after segmenting the `choosers` table. Segments that have not already been explicitly added will be automatically added with default model. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column with the same name as the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice Name of column in `choosers` that indicates which alternative they have currently chosen. Returns ------- log_likelihoods : dict of dict Keys will be model names and values will be dictionaries of log-liklihood values as returned by MNLDiscreteChoiceModel.fit. """ logger.debug('start: fit models in segmented LCM {}'.format(self.name)) choosers, alternatives = self.apply_fit_filters(choosers, alternatives) unique = choosers[self.segmentation_col].unique() # Remove any existing segments that may no longer have counterparts # in the data. This can happen when loading a saved model and then # calling this method with data that no longer has segments that # were there the last time this was called. gone = set(self._group.models) - set(unique) for g in gone: del self._group.models[g] for x in unique: if x not in self._group.models: self.add_segment(x) results = self._group.fit(choosers, alternatives, current_choice) logger.debug( 'finish: fit models in segmented LCM {}'.format(self.name)) return results
python
def fit(self, choosers, alternatives, current_choice): """ Fit and save models based on given data after segmenting the `choosers` table. Segments that have not already been explicitly added will be automatically added with default model. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column with the same name as the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice Name of column in `choosers` that indicates which alternative they have currently chosen. Returns ------- log_likelihoods : dict of dict Keys will be model names and values will be dictionaries of log-liklihood values as returned by MNLDiscreteChoiceModel.fit. """ logger.debug('start: fit models in segmented LCM {}'.format(self.name)) choosers, alternatives = self.apply_fit_filters(choosers, alternatives) unique = choosers[self.segmentation_col].unique() # Remove any existing segments that may no longer have counterparts # in the data. This can happen when loading a saved model and then # calling this method with data that no longer has segments that # were there the last time this was called. gone = set(self._group.models) - set(unique) for g in gone: del self._group.models[g] for x in unique: if x not in self._group.models: self.add_segment(x) results = self._group.fit(choosers, alternatives, current_choice) logger.debug( 'finish: fit models in segmented LCM {}'.format(self.name)) return results
[ "def", "fit", "(", "self", ",", "choosers", ",", "alternatives", ",", "current_choice", ")", ":", "logger", ".", "debug", "(", "'start: fit models in segmented LCM {}'", ".", "format", "(", "self", ".", "name", ")", ")", "choosers", ",", "alternatives", "=", "self", ".", "apply_fit_filters", "(", "choosers", ",", "alternatives", ")", "unique", "=", "choosers", "[", "self", ".", "segmentation_col", "]", ".", "unique", "(", ")", "# Remove any existing segments that may no longer have counterparts", "# in the data. This can happen when loading a saved model and then", "# calling this method with data that no longer has segments that", "# were there the last time this was called.", "gone", "=", "set", "(", "self", ".", "_group", ".", "models", ")", "-", "set", "(", "unique", ")", "for", "g", "in", "gone", ":", "del", "self", ".", "_group", ".", "models", "[", "g", "]", "for", "x", "in", "unique", ":", "if", "x", "not", "in", "self", ".", "_group", ".", "models", ":", "self", ".", "add_segment", "(", "x", ")", "results", "=", "self", ".", "_group", ".", "fit", "(", "choosers", ",", "alternatives", ",", "current_choice", ")", "logger", ".", "debug", "(", "'finish: fit models in segmented LCM {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "results" ]
Fit and save models based on given data after segmenting the `choosers` table. Segments that have not already been explicitly added will be automatically added with default model. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column with the same name as the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice Name of column in `choosers` that indicates which alternative they have currently chosen. Returns ------- log_likelihoods : dict of dict Keys will be model names and values will be dictionaries of log-liklihood values as returned by MNLDiscreteChoiceModel.fit.
[ "Fit", "and", "save", "models", "based", "on", "given", "data", "after", "segmenting", "the", "choosers", "table", ".", "Segments", "that", "have", "not", "already", "been", "explicitly", "added", "will", "be", "automatically", "added", "with", "default", "model", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1480-L1526
2,959
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel._filter_choosers_alts
def _filter_choosers_alts(self, choosers, alternatives): """ Apply filters to the choosers and alts tables. """ return ( util.apply_filter_query( choosers, self.choosers_predict_filters), util.apply_filter_query( alternatives, self.alts_predict_filters))
python
def _filter_choosers_alts(self, choosers, alternatives): """ Apply filters to the choosers and alts tables. """ return ( util.apply_filter_query( choosers, self.choosers_predict_filters), util.apply_filter_query( alternatives, self.alts_predict_filters))
[ "def", "_filter_choosers_alts", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "return", "(", "util", ".", "apply_filter_query", "(", "choosers", ",", "self", ".", "choosers_predict_filters", ")", ",", "util", ".", "apply_filter_query", "(", "alternatives", ",", "self", ".", "alts_predict_filters", ")", ")" ]
Apply filters to the choosers and alts tables.
[ "Apply", "filters", "to", "the", "choosers", "and", "alts", "tables", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1536-L1545
2,960
UDST/urbansim
scripts/cache_to_hdf5.py
cache_to_df
def cache_to_df(dir_path): """ Convert a directory of binary array data files to a Pandas DataFrame. Parameters ---------- dir_path : str """ table = {} for attrib in glob.glob(os.path.join(dir_path, '*')): attrib_name, attrib_ext = os.path.splitext(os.path.basename(attrib)) if attrib_ext == '.lf8': attrib_data = np.fromfile(attrib, np.float64) table[attrib_name] = attrib_data elif attrib_ext == '.lf4': attrib_data = np.fromfile(attrib, np.float32) table[attrib_name] = attrib_data elif attrib_ext == '.li2': attrib_data = np.fromfile(attrib, np.int16) table[attrib_name] = attrib_data elif attrib_ext == '.li4': attrib_data = np.fromfile(attrib, np.int32) table[attrib_name] = attrib_data elif attrib_ext == '.li8': attrib_data = np.fromfile(attrib, np.int64) table[attrib_name] = attrib_data elif attrib_ext == '.ib1': attrib_data = np.fromfile(attrib, np.bool_) table[attrib_name] = attrib_data elif attrib_ext.startswith('.iS'): length_string = int(attrib_ext[3:]) attrib_data = np.fromfile(attrib, ('a' + str(length_string))) table[attrib_name] = attrib_data else: print('Array {} is not a recognized data type'.format(attrib)) df = pd.DataFrame(table) return df
python
def cache_to_df(dir_path): """ Convert a directory of binary array data files to a Pandas DataFrame. Parameters ---------- dir_path : str """ table = {} for attrib in glob.glob(os.path.join(dir_path, '*')): attrib_name, attrib_ext = os.path.splitext(os.path.basename(attrib)) if attrib_ext == '.lf8': attrib_data = np.fromfile(attrib, np.float64) table[attrib_name] = attrib_data elif attrib_ext == '.lf4': attrib_data = np.fromfile(attrib, np.float32) table[attrib_name] = attrib_data elif attrib_ext == '.li2': attrib_data = np.fromfile(attrib, np.int16) table[attrib_name] = attrib_data elif attrib_ext == '.li4': attrib_data = np.fromfile(attrib, np.int32) table[attrib_name] = attrib_data elif attrib_ext == '.li8': attrib_data = np.fromfile(attrib, np.int64) table[attrib_name] = attrib_data elif attrib_ext == '.ib1': attrib_data = np.fromfile(attrib, np.bool_) table[attrib_name] = attrib_data elif attrib_ext.startswith('.iS'): length_string = int(attrib_ext[3:]) attrib_data = np.fromfile(attrib, ('a' + str(length_string))) table[attrib_name] = attrib_data else: print('Array {} is not a recognized data type'.format(attrib)) df = pd.DataFrame(table) return df
[ "def", "cache_to_df", "(", "dir_path", ")", ":", "table", "=", "{", "}", "for", "attrib", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "dir_path", ",", "'*'", ")", ")", ":", "attrib_name", ",", "attrib_ext", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "attrib", ")", ")", "if", "attrib_ext", "==", "'.lf8'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "float64", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", "==", "'.lf4'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "float32", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", "==", "'.li2'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "int16", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", "==", "'.li4'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "int32", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", "==", "'.li8'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "int64", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", "==", "'.ib1'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "bool_", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", ".", "startswith", "(", "'.iS'", ")", ":", "length_string", "=", "int", "(", "attrib_ext", "[", "3", ":", "]", ")", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "(", "'a'", "+", "str", "(", "length_string", ")", ")", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "else", ":", "print", "(", "'Array {} is not a recognized data type'", ".", "format", "(", "attrib", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "table", ")", "return", "df" ]
Convert a directory of binary array data files to a Pandas DataFrame. Parameters ---------- dir_path : str
[ "Convert", "a", "directory", "of", "binary", "array", "data", "files", "to", "a", "Pandas", "DataFrame", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/scripts/cache_to_hdf5.py#L14-L60
2,961
UDST/urbansim
scripts/cache_to_hdf5.py
convert_dirs
def convert_dirs(base_dir, hdf_name, complib=None, complevel=0): """ Convert nested set of directories to """ print('Converting directories in {}'.format(base_dir)) dirs = glob.glob(os.path.join(base_dir, '*')) dirs = {d for d in dirs if os.path.basename(d) in DIRECTORIES} if not dirs: raise RuntimeError('No direcotries found matching known data.') store = pd.HDFStore( hdf_name, mode='w', complevel=complevel, complib=complib) for dirpath in dirs: dirname = os.path.basename(dirpath) print(dirname) df = cache_to_df(dirpath) if dirname == 'travel_data': keys = ['from_zone_id', 'to_zone_id'] elif dirname == 'annual_employment_control_totals': keys = ['sector_id', 'year', 'home_based_status'] elif dirname == 'annual_job_relocation_rates': keys = ['sector_id'] elif dirname == 'annual_household_control_totals': keys = ['year'] elif dirname == 'annual_household_relocation_rates': keys = ['age_of_head_max', 'age_of_head_min', 'income_min', 'income_max'] elif dirname == 'building_sqft_per_job': keys = ['zone_id', 'building_type_id'] elif dirname == 'counties': keys = ['county_id'] elif dirname == 'development_event_history': keys = ['building_id'] elif dirname == 'target_vacancies': keys = ['building_type_id', 'year'] else: keys = [dirname[:-1] + '_id'] if dirname != 'annual_household_relocation_rates': df = df.set_index(keys) for colname in df.columns: if df[colname].dtype == np.float64: df[colname] = df[colname].astype(np.float32) elif df[colname].dtype == np.int64: df[colname] = df[colname].astype(np.int32) else: df[colname] = df[colname] df.info() print(os.linesep) store.put(dirname, df) store.close()
python
def convert_dirs(base_dir, hdf_name, complib=None, complevel=0): """ Convert nested set of directories to """ print('Converting directories in {}'.format(base_dir)) dirs = glob.glob(os.path.join(base_dir, '*')) dirs = {d for d in dirs if os.path.basename(d) in DIRECTORIES} if not dirs: raise RuntimeError('No direcotries found matching known data.') store = pd.HDFStore( hdf_name, mode='w', complevel=complevel, complib=complib) for dirpath in dirs: dirname = os.path.basename(dirpath) print(dirname) df = cache_to_df(dirpath) if dirname == 'travel_data': keys = ['from_zone_id', 'to_zone_id'] elif dirname == 'annual_employment_control_totals': keys = ['sector_id', 'year', 'home_based_status'] elif dirname == 'annual_job_relocation_rates': keys = ['sector_id'] elif dirname == 'annual_household_control_totals': keys = ['year'] elif dirname == 'annual_household_relocation_rates': keys = ['age_of_head_max', 'age_of_head_min', 'income_min', 'income_max'] elif dirname == 'building_sqft_per_job': keys = ['zone_id', 'building_type_id'] elif dirname == 'counties': keys = ['county_id'] elif dirname == 'development_event_history': keys = ['building_id'] elif dirname == 'target_vacancies': keys = ['building_type_id', 'year'] else: keys = [dirname[:-1] + '_id'] if dirname != 'annual_household_relocation_rates': df = df.set_index(keys) for colname in df.columns: if df[colname].dtype == np.float64: df[colname] = df[colname].astype(np.float32) elif df[colname].dtype == np.int64: df[colname] = df[colname].astype(np.int32) else: df[colname] = df[colname] df.info() print(os.linesep) store.put(dirname, df) store.close()
[ "def", "convert_dirs", "(", "base_dir", ",", "hdf_name", ",", "complib", "=", "None", ",", "complevel", "=", "0", ")", ":", "print", "(", "'Converting directories in {}'", ".", "format", "(", "base_dir", ")", ")", "dirs", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "base_dir", ",", "'*'", ")", ")", "dirs", "=", "{", "d", "for", "d", "in", "dirs", "if", "os", ".", "path", ".", "basename", "(", "d", ")", "in", "DIRECTORIES", "}", "if", "not", "dirs", ":", "raise", "RuntimeError", "(", "'No direcotries found matching known data.'", ")", "store", "=", "pd", ".", "HDFStore", "(", "hdf_name", ",", "mode", "=", "'w'", ",", "complevel", "=", "complevel", ",", "complib", "=", "complib", ")", "for", "dirpath", "in", "dirs", ":", "dirname", "=", "os", ".", "path", ".", "basename", "(", "dirpath", ")", "print", "(", "dirname", ")", "df", "=", "cache_to_df", "(", "dirpath", ")", "if", "dirname", "==", "'travel_data'", ":", "keys", "=", "[", "'from_zone_id'", ",", "'to_zone_id'", "]", "elif", "dirname", "==", "'annual_employment_control_totals'", ":", "keys", "=", "[", "'sector_id'", ",", "'year'", ",", "'home_based_status'", "]", "elif", "dirname", "==", "'annual_job_relocation_rates'", ":", "keys", "=", "[", "'sector_id'", "]", "elif", "dirname", "==", "'annual_household_control_totals'", ":", "keys", "=", "[", "'year'", "]", "elif", "dirname", "==", "'annual_household_relocation_rates'", ":", "keys", "=", "[", "'age_of_head_max'", ",", "'age_of_head_min'", ",", "'income_min'", ",", "'income_max'", "]", "elif", "dirname", "==", "'building_sqft_per_job'", ":", "keys", "=", "[", "'zone_id'", ",", "'building_type_id'", "]", "elif", "dirname", "==", "'counties'", ":", "keys", "=", "[", "'county_id'", "]", "elif", "dirname", "==", "'development_event_history'", ":", "keys", "=", "[", "'building_id'", "]", "elif", "dirname", "==", "'target_vacancies'", ":", "keys", "=", "[", "'building_type_id'", ",", "'year'", "]", "else", ":", "keys", "=", "[", "dirname", "[", ":", "-", "1", "]", "+", "'_id'", "]", "if", "dirname", "!=", "'annual_household_relocation_rates'", ":", "df", "=", "df", ".", "set_index", "(", "keys", ")", "for", "colname", "in", "df", ".", "columns", ":", "if", "df", "[", "colname", "]", ".", "dtype", "==", "np", ".", "float64", ":", "df", "[", "colname", "]", "=", "df", "[", "colname", "]", ".", "astype", "(", "np", ".", "float32", ")", "elif", "df", "[", "colname", "]", ".", "dtype", "==", "np", ".", "int64", ":", "df", "[", "colname", "]", "=", "df", "[", "colname", "]", ".", "astype", "(", "np", ".", "int32", ")", "else", ":", "df", "[", "colname", "]", "=", "df", "[", "colname", "]", "df", ".", "info", "(", ")", "print", "(", "os", ".", "linesep", ")", "store", ".", "put", "(", "dirname", ",", "df", ")", "store", ".", "close", "(", ")" ]
Convert nested set of directories to
[ "Convert", "nested", "set", "of", "directories", "to" ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/scripts/cache_to_hdf5.py#L72-L130
2,962
UDST/urbansim
urbansim/utils/misc.py
get_run_number
def get_run_number(): """ Get a run number for this execution of the model system, for identifying the output hdf5 files). Returns ------- The integer number for this run of the model system. """ try: f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'r') num = int(f.read()) f.close() except Exception: num = 1 f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'w') f.write(str(num + 1)) f.close() return num
python
def get_run_number(): """ Get a run number for this execution of the model system, for identifying the output hdf5 files). Returns ------- The integer number for this run of the model system. """ try: f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'r') num = int(f.read()) f.close() except Exception: num = 1 f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'w') f.write(str(num + 1)) f.close() return num
[ "def", "get_run_number", "(", ")", ":", "try", ":", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getenv", "(", "'DATA_HOME'", ",", "\".\"", ")", ",", "'RUNNUM'", ")", ",", "'r'", ")", "num", "=", "int", "(", "f", ".", "read", "(", ")", ")", "f", ".", "close", "(", ")", "except", "Exception", ":", "num", "=", "1", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getenv", "(", "'DATA_HOME'", ",", "\".\"", ")", ",", "'RUNNUM'", ")", ",", "'w'", ")", "f", ".", "write", "(", "str", "(", "num", "+", "1", ")", ")", "f", ".", "close", "(", ")", "return", "num" ]
Get a run number for this execution of the model system, for identifying the output hdf5 files). Returns ------- The integer number for this run of the model system.
[ "Get", "a", "run", "number", "for", "this", "execution", "of", "the", "model", "system", "for", "identifying", "the", "output", "hdf5", "files", ")", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L97-L115
2,963
UDST/urbansim
urbansim/utils/misc.py
compute_range
def compute_range(travel_data, attr, travel_time_attr, dist, agg=np.sum): """ Compute a zone-based accessibility query using the urbansim format travel data dataframe. Parameters ---------- travel_data : dataframe The dataframe of urbansim format travel data. Has from_zone_id as first index, to_zone_id as second index, and different impedances between zones as columns. attr : series The attr to aggregate. Should be indexed by zone_id and the values will be aggregated. travel_time_attr : string The column name in travel_data to use as the impedance. dist : float The max distance to aggregate up to agg : function, optional, np.sum by default The numpy function to use for aggregation """ travel_data = travel_data.reset_index(level=1) travel_data = travel_data[travel_data[travel_time_attr] < dist] travel_data["attr"] = attr[travel_data.to_zone_id].values return travel_data.groupby(level=0).attr.apply(agg)
python
def compute_range(travel_data, attr, travel_time_attr, dist, agg=np.sum): """ Compute a zone-based accessibility query using the urbansim format travel data dataframe. Parameters ---------- travel_data : dataframe The dataframe of urbansim format travel data. Has from_zone_id as first index, to_zone_id as second index, and different impedances between zones as columns. attr : series The attr to aggregate. Should be indexed by zone_id and the values will be aggregated. travel_time_attr : string The column name in travel_data to use as the impedance. dist : float The max distance to aggregate up to agg : function, optional, np.sum by default The numpy function to use for aggregation """ travel_data = travel_data.reset_index(level=1) travel_data = travel_data[travel_data[travel_time_attr] < dist] travel_data["attr"] = attr[travel_data.to_zone_id].values return travel_data.groupby(level=0).attr.apply(agg)
[ "def", "compute_range", "(", "travel_data", ",", "attr", ",", "travel_time_attr", ",", "dist", ",", "agg", "=", "np", ".", "sum", ")", ":", "travel_data", "=", "travel_data", ".", "reset_index", "(", "level", "=", "1", ")", "travel_data", "=", "travel_data", "[", "travel_data", "[", "travel_time_attr", "]", "<", "dist", "]", "travel_data", "[", "\"attr\"", "]", "=", "attr", "[", "travel_data", ".", "to_zone_id", "]", ".", "values", "return", "travel_data", ".", "groupby", "(", "level", "=", "0", ")", ".", "attr", ".", "apply", "(", "agg", ")" ]
Compute a zone-based accessibility query using the urbansim format travel data dataframe. Parameters ---------- travel_data : dataframe The dataframe of urbansim format travel data. Has from_zone_id as first index, to_zone_id as second index, and different impedances between zones as columns. attr : series The attr to aggregate. Should be indexed by zone_id and the values will be aggregated. travel_time_attr : string The column name in travel_data to use as the impedance. dist : float The max distance to aggregate up to agg : function, optional, np.sum by default The numpy function to use for aggregation
[ "Compute", "a", "zone", "-", "based", "accessibility", "query", "using", "the", "urbansim", "format", "travel", "data", "dataframe", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L118-L142
2,964
UDST/urbansim
urbansim/utils/misc.py
reindex
def reindex(series1, series2): """ This reindexes the first series by the second series. This is an extremely common operation that does not appear to be in Pandas at this time. If anyone knows of an easier way to do this in Pandas, please inform the UrbanSim developers. The canonical example would be a parcel series which has an index which is parcel_ids and a value which you want to fetch, let's say it's land_area. Another dataset, let's say of buildings has a series which indicate the parcel_ids that the buildings are located on, but which does not have land_area. If you pass parcels.land_area as the first series and buildings.parcel_id as the second series, this function returns a series which is indexed by buildings and has land_area as values and can be added to the buildings dataset. In short, this is a join on to a different table using a foreign key stored in the current table, but with only one attribute rather than for a full dataset. This is very similar to the pandas "loc" function or "reindex" function, but neither of those functions return the series indexed on the current table. In both of those cases, the series would be indexed on the foreign table and would require a second step to change the index. """ # turns out the merge is much faster than the .loc below df = pd.merge(pd.DataFrame({"left": series2}), pd.DataFrame({"right": series1}), left_on="left", right_index=True, how="left") return df.right
python
def reindex(series1, series2): """ This reindexes the first series by the second series. This is an extremely common operation that does not appear to be in Pandas at this time. If anyone knows of an easier way to do this in Pandas, please inform the UrbanSim developers. The canonical example would be a parcel series which has an index which is parcel_ids and a value which you want to fetch, let's say it's land_area. Another dataset, let's say of buildings has a series which indicate the parcel_ids that the buildings are located on, but which does not have land_area. If you pass parcels.land_area as the first series and buildings.parcel_id as the second series, this function returns a series which is indexed by buildings and has land_area as values and can be added to the buildings dataset. In short, this is a join on to a different table using a foreign key stored in the current table, but with only one attribute rather than for a full dataset. This is very similar to the pandas "loc" function or "reindex" function, but neither of those functions return the series indexed on the current table. In both of those cases, the series would be indexed on the foreign table and would require a second step to change the index. """ # turns out the merge is much faster than the .loc below df = pd.merge(pd.DataFrame({"left": series2}), pd.DataFrame({"right": series1}), left_on="left", right_index=True, how="left") return df.right
[ "def", "reindex", "(", "series1", ",", "series2", ")", ":", "# turns out the merge is much faster than the .loc below", "df", "=", "pd", ".", "merge", "(", "pd", ".", "DataFrame", "(", "{", "\"left\"", ":", "series2", "}", ")", ",", "pd", ".", "DataFrame", "(", "{", "\"right\"", ":", "series1", "}", ")", ",", "left_on", "=", "\"left\"", ",", "right_index", "=", "True", ",", "how", "=", "\"left\"", ")", "return", "df", ".", "right" ]
This reindexes the first series by the second series. This is an extremely common operation that does not appear to be in Pandas at this time. If anyone knows of an easier way to do this in Pandas, please inform the UrbanSim developers. The canonical example would be a parcel series which has an index which is parcel_ids and a value which you want to fetch, let's say it's land_area. Another dataset, let's say of buildings has a series which indicate the parcel_ids that the buildings are located on, but which does not have land_area. If you pass parcels.land_area as the first series and buildings.parcel_id as the second series, this function returns a series which is indexed by buildings and has land_area as values and can be added to the buildings dataset. In short, this is a join on to a different table using a foreign key stored in the current table, but with only one attribute rather than for a full dataset. This is very similar to the pandas "loc" function or "reindex" function, but neither of those functions return the series indexed on the current table. In both of those cases, the series would be indexed on the foreign table and would require a second step to change the index.
[ "This", "reindexes", "the", "first", "series", "by", "the", "second", "series", ".", "This", "is", "an", "extremely", "common", "operation", "that", "does", "not", "appear", "to", "be", "in", "Pandas", "at", "this", "time", ".", "If", "anyone", "knows", "of", "an", "easier", "way", "to", "do", "this", "in", "Pandas", "please", "inform", "the", "UrbanSim", "developers", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L145-L177
2,965
UDST/urbansim
urbansim/utils/misc.py
df64bitto32bit
def df64bitto32bit(tbl): """ Convert a Pandas dataframe from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- tbl : The dataframe to convert Returns ------- The converted dataframe """ newtbl = pd.DataFrame(index=tbl.index) for colname in tbl.columns: newtbl[colname] = series64bitto32bit(tbl[colname]) return newtbl
python
def df64bitto32bit(tbl): """ Convert a Pandas dataframe from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- tbl : The dataframe to convert Returns ------- The converted dataframe """ newtbl = pd.DataFrame(index=tbl.index) for colname in tbl.columns: newtbl[colname] = series64bitto32bit(tbl[colname]) return newtbl
[ "def", "df64bitto32bit", "(", "tbl", ")", ":", "newtbl", "=", "pd", ".", "DataFrame", "(", "index", "=", "tbl", ".", "index", ")", "for", "colname", "in", "tbl", ".", "columns", ":", "newtbl", "[", "colname", "]", "=", "series64bitto32bit", "(", "tbl", "[", "colname", "]", ")", "return", "newtbl" ]
Convert a Pandas dataframe from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- tbl : The dataframe to convert Returns ------- The converted dataframe
[ "Convert", "a", "Pandas", "dataframe", "from", "64", "bit", "types", "to", "32", "bit", "types", "to", "save", "memory", "or", "disk", "space", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L320-L336
2,966
UDST/urbansim
urbansim/utils/misc.py
series64bitto32bit
def series64bitto32bit(s): """ Convert a Pandas series from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- s : The series to convert Returns ------- The converted series """ if s.dtype == np.float64: return s.astype('float32') elif s.dtype == np.int64: return s.astype('int32') return s
python
def series64bitto32bit(s): """ Convert a Pandas series from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- s : The series to convert Returns ------- The converted series """ if s.dtype == np.float64: return s.astype('float32') elif s.dtype == np.int64: return s.astype('int32') return s
[ "def", "series64bitto32bit", "(", "s", ")", ":", "if", "s", ".", "dtype", "==", "np", ".", "float64", ":", "return", "s", ".", "astype", "(", "'float32'", ")", "elif", "s", ".", "dtype", "==", "np", ".", "int64", ":", "return", "s", ".", "astype", "(", "'int32'", ")", "return", "s" ]
Convert a Pandas series from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- s : The series to convert Returns ------- The converted series
[ "Convert", "a", "Pandas", "series", "from", "64", "bit", "types", "to", "32", "bit", "types", "to", "save", "memory", "or", "disk", "space", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L339-L356
2,967
UDST/urbansim
urbansim/utils/misc.py
pandasdfsummarytojson
def pandasdfsummarytojson(df, ndigits=3): """ Convert the result of a Parameters ---------- df : The result of a Pandas describe operation. ndigits : int, optional - The number of significant digits to round to. Returns ------- A json object which captures the describe. Keys are field names and values are dictionaries with all of the indexes returned by the Pandas describe. """ df = df.transpose() return {k: _pandassummarytojson(v, ndigits) for k, v in df.iterrows()}
python
def pandasdfsummarytojson(df, ndigits=3): """ Convert the result of a Parameters ---------- df : The result of a Pandas describe operation. ndigits : int, optional - The number of significant digits to round to. Returns ------- A json object which captures the describe. Keys are field names and values are dictionaries with all of the indexes returned by the Pandas describe. """ df = df.transpose() return {k: _pandassummarytojson(v, ndigits) for k, v in df.iterrows()}
[ "def", "pandasdfsummarytojson", "(", "df", ",", "ndigits", "=", "3", ")", ":", "df", "=", "df", ".", "transpose", "(", ")", "return", "{", "k", ":", "_pandassummarytojson", "(", "v", ",", "ndigits", ")", "for", "k", ",", "v", "in", "df", ".", "iterrows", "(", ")", "}" ]
Convert the result of a Parameters ---------- df : The result of a Pandas describe operation. ndigits : int, optional - The number of significant digits to round to. Returns ------- A json object which captures the describe. Keys are field names and values are dictionaries with all of the indexes returned by the Pandas describe.
[ "Convert", "the", "result", "of", "a" ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L363-L379
2,968
UDST/urbansim
urbansim/utils/misc.py
column_map
def column_map(tables, columns): """ Take a list of tables and a list of column names and resolve which columns come from which table. Parameters ---------- tables : sequence of _DataFrameWrapper or _TableFuncWrapper Could also be sequence of modified pandas.DataFrames, the important thing is that they have ``.name`` and ``.columns`` attributes. columns : sequence of str The column names of interest. Returns ------- col_map : dict Maps table names to lists of column names. """ if not columns: return {t.name: None for t in tables} columns = set(columns) colmap = {t.name: list(set(t.columns).intersection(columns)) for t in tables} foundcols = tz.reduce(lambda x, y: x.union(y), (set(v) for v in colmap.values())) if foundcols != columns: raise RuntimeError('Not all required columns were found. ' 'Missing: {}'.format(list(columns - foundcols))) return colmap
python
def column_map(tables, columns): """ Take a list of tables and a list of column names and resolve which columns come from which table. Parameters ---------- tables : sequence of _DataFrameWrapper or _TableFuncWrapper Could also be sequence of modified pandas.DataFrames, the important thing is that they have ``.name`` and ``.columns`` attributes. columns : sequence of str The column names of interest. Returns ------- col_map : dict Maps table names to lists of column names. """ if not columns: return {t.name: None for t in tables} columns = set(columns) colmap = {t.name: list(set(t.columns).intersection(columns)) for t in tables} foundcols = tz.reduce(lambda x, y: x.union(y), (set(v) for v in colmap.values())) if foundcols != columns: raise RuntimeError('Not all required columns were found. ' 'Missing: {}'.format(list(columns - foundcols))) return colmap
[ "def", "column_map", "(", "tables", ",", "columns", ")", ":", "if", "not", "columns", ":", "return", "{", "t", ".", "name", ":", "None", "for", "t", "in", "tables", "}", "columns", "=", "set", "(", "columns", ")", "colmap", "=", "{", "t", ".", "name", ":", "list", "(", "set", "(", "t", ".", "columns", ")", ".", "intersection", "(", "columns", ")", ")", "for", "t", "in", "tables", "}", "foundcols", "=", "tz", ".", "reduce", "(", "lambda", "x", ",", "y", ":", "x", ".", "union", "(", "y", ")", ",", "(", "set", "(", "v", ")", "for", "v", "in", "colmap", ".", "values", "(", ")", ")", ")", "if", "foundcols", "!=", "columns", ":", "raise", "RuntimeError", "(", "'Not all required columns were found. '", "'Missing: {}'", ".", "format", "(", "list", "(", "columns", "-", "foundcols", ")", ")", ")", "return", "colmap" ]
Take a list of tables and a list of column names and resolve which columns come from which table. Parameters ---------- tables : sequence of _DataFrameWrapper or _TableFuncWrapper Could also be sequence of modified pandas.DataFrames, the important thing is that they have ``.name`` and ``.columns`` attributes. columns : sequence of str The column names of interest. Returns ------- col_map : dict Maps table names to lists of column names.
[ "Take", "a", "list", "of", "tables", "and", "a", "list", "of", "column", "names", "and", "resolve", "which", "columns", "come", "from", "which", "table", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L382-L410
2,969
UDST/urbansim
urbansim/utils/misc.py
column_list
def column_list(tables, columns): """ Take a list of tables and a list of column names and return the columns that are present in the tables. Parameters ---------- tables : sequence of _DataFrameWrapper or _TableFuncWrapper Could also be sequence of modified pandas.DataFrames, the important thing is that they have ``.name`` and ``.columns`` attributes. columns : sequence of str The column names of interest. Returns ------- cols : list Lists of column names available in the tables. """ columns = set(columns) foundcols = tz.reduce(lambda x, y: x.union(y), (set(t.columns) for t in tables)) return list(columns.intersection(foundcols))
python
def column_list(tables, columns): """ Take a list of tables and a list of column names and return the columns that are present in the tables. Parameters ---------- tables : sequence of _DataFrameWrapper or _TableFuncWrapper Could also be sequence of modified pandas.DataFrames, the important thing is that they have ``.name`` and ``.columns`` attributes. columns : sequence of str The column names of interest. Returns ------- cols : list Lists of column names available in the tables. """ columns = set(columns) foundcols = tz.reduce(lambda x, y: x.union(y), (set(t.columns) for t in tables)) return list(columns.intersection(foundcols))
[ "def", "column_list", "(", "tables", ",", "columns", ")", ":", "columns", "=", "set", "(", "columns", ")", "foundcols", "=", "tz", ".", "reduce", "(", "lambda", "x", ",", "y", ":", "x", ".", "union", "(", "y", ")", ",", "(", "set", "(", "t", ".", "columns", ")", "for", "t", "in", "tables", ")", ")", "return", "list", "(", "columns", ".", "intersection", "(", "foundcols", ")", ")" ]
Take a list of tables and a list of column names and return the columns that are present in the tables. Parameters ---------- tables : sequence of _DataFrameWrapper or _TableFuncWrapper Could also be sequence of modified pandas.DataFrames, the important thing is that they have ``.name`` and ``.columns`` attributes. columns : sequence of str The column names of interest. Returns ------- cols : list Lists of column names available in the tables.
[ "Take", "a", "list", "of", "tables", "and", "a", "list", "of", "column", "names", "and", "return", "the", "columns", "that", "are", "present", "in", "the", "tables", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L413-L434
2,970
UDST/urbansim
urbansim/utils/sampling.py
accounting_sample_replace
def accounting_sample_replace(total, data, accounting_column, prob_column=None, max_iterations=50): """ Sample rows with accounting with replacement. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. accounting_column: string Name of column with accounting totals/quantities to apply towards the control. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. max_iterations: int, optional, default 50 When using an accounting attribute, the maximum number of sampling iterations that will be applied. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool Indicates if the total was matched exactly. """ # check for probabilities p = get_probs(data, prob_column) # determine avg number of accounting items per sample (e.g. persons per household) per_sample = data[accounting_column].sum() / (1.0 * len(data.index.values)) curr_total = 0 remaining = total sample_rows = pd.DataFrame() closest = None closest_remain = total matched = False for i in range(0, max_iterations): # stop if we've hit the control if remaining == 0: matched = True break # if sampling with probabilities, re-caclc the # of items per sample # after the initial sample, this way the sample size reflects the probabilities if p is not None and i == 1: per_sample = sample_rows[accounting_column].sum() / (1.0 * len(sample_rows)) # update the sample num_samples = int(math.ceil(math.fabs(remaining) / per_sample)) if remaining > 0: # we're short, add to the sample curr_ids = np.random.choice(data.index.values, num_samples, p=p) sample_rows = pd.concat([sample_rows, data.loc[curr_ids]]) else: # we've overshot, remove from existing samples (FIFO) sample_rows = sample_rows.iloc[num_samples:].copy() # update the total and check for the closest result curr_total = sample_rows[accounting_column].sum() remaining = total - curr_total if abs(remaining) < closest_remain: closest_remain = abs(remaining) closest = sample_rows return closest, matched
python
def accounting_sample_replace(total, data, accounting_column, prob_column=None, max_iterations=50): """ Sample rows with accounting with replacement. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. accounting_column: string Name of column with accounting totals/quantities to apply towards the control. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. max_iterations: int, optional, default 50 When using an accounting attribute, the maximum number of sampling iterations that will be applied. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool Indicates if the total was matched exactly. """ # check for probabilities p = get_probs(data, prob_column) # determine avg number of accounting items per sample (e.g. persons per household) per_sample = data[accounting_column].sum() / (1.0 * len(data.index.values)) curr_total = 0 remaining = total sample_rows = pd.DataFrame() closest = None closest_remain = total matched = False for i in range(0, max_iterations): # stop if we've hit the control if remaining == 0: matched = True break # if sampling with probabilities, re-caclc the # of items per sample # after the initial sample, this way the sample size reflects the probabilities if p is not None and i == 1: per_sample = sample_rows[accounting_column].sum() / (1.0 * len(sample_rows)) # update the sample num_samples = int(math.ceil(math.fabs(remaining) / per_sample)) if remaining > 0: # we're short, add to the sample curr_ids = np.random.choice(data.index.values, num_samples, p=p) sample_rows = pd.concat([sample_rows, data.loc[curr_ids]]) else: # we've overshot, remove from existing samples (FIFO) sample_rows = sample_rows.iloc[num_samples:].copy() # update the total and check for the closest result curr_total = sample_rows[accounting_column].sum() remaining = total - curr_total if abs(remaining) < closest_remain: closest_remain = abs(remaining) closest = sample_rows return closest, matched
[ "def", "accounting_sample_replace", "(", "total", ",", "data", ",", "accounting_column", ",", "prob_column", "=", "None", ",", "max_iterations", "=", "50", ")", ":", "# check for probabilities", "p", "=", "get_probs", "(", "data", ",", "prob_column", ")", "# determine avg number of accounting items per sample (e.g. persons per household)", "per_sample", "=", "data", "[", "accounting_column", "]", ".", "sum", "(", ")", "/", "(", "1.0", "*", "len", "(", "data", ".", "index", ".", "values", ")", ")", "curr_total", "=", "0", "remaining", "=", "total", "sample_rows", "=", "pd", ".", "DataFrame", "(", ")", "closest", "=", "None", "closest_remain", "=", "total", "matched", "=", "False", "for", "i", "in", "range", "(", "0", ",", "max_iterations", ")", ":", "# stop if we've hit the control", "if", "remaining", "==", "0", ":", "matched", "=", "True", "break", "# if sampling with probabilities, re-caclc the # of items per sample", "# after the initial sample, this way the sample size reflects the probabilities", "if", "p", "is", "not", "None", "and", "i", "==", "1", ":", "per_sample", "=", "sample_rows", "[", "accounting_column", "]", ".", "sum", "(", ")", "/", "(", "1.0", "*", "len", "(", "sample_rows", ")", ")", "# update the sample", "num_samples", "=", "int", "(", "math", ".", "ceil", "(", "math", ".", "fabs", "(", "remaining", ")", "/", "per_sample", ")", ")", "if", "remaining", ">", "0", ":", "# we're short, add to the sample", "curr_ids", "=", "np", ".", "random", ".", "choice", "(", "data", ".", "index", ".", "values", ",", "num_samples", ",", "p", "=", "p", ")", "sample_rows", "=", "pd", ".", "concat", "(", "[", "sample_rows", ",", "data", ".", "loc", "[", "curr_ids", "]", "]", ")", "else", ":", "# we've overshot, remove from existing samples (FIFO)", "sample_rows", "=", "sample_rows", ".", "iloc", "[", "num_samples", ":", "]", ".", "copy", "(", ")", "# update the total and check for the closest result", "curr_total", "=", "sample_rows", "[", "accounting_column", "]", ".", "sum", "(", ")", "remaining", "=", "total", "-", "curr_total", "if", "abs", "(", "remaining", ")", "<", "closest_remain", ":", "closest_remain", "=", "abs", "(", "remaining", ")", "closest", "=", "sample_rows", "return", "closest", ",", "matched" ]
Sample rows with accounting with replacement. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. accounting_column: string Name of column with accounting totals/quantities to apply towards the control. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. max_iterations: int, optional, default 50 When using an accounting attribute, the maximum number of sampling iterations that will be applied. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool Indicates if the total was matched exactly.
[ "Sample", "rows", "with", "accounting", "with", "replacement", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/sampling.py#L35-L105
2,971
UDST/urbansim
urbansim/utils/sampling.py
accounting_sample_no_replace
def accounting_sample_no_replace(total, data, accounting_column, prob_column=None): """ Samples rows with accounting without replacement. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. accounting_column: string Name of column with accounting totals/quantities to apply towards the control. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool Indicates if the total was matched exactly. """ # make sure this is even feasible if total > data[accounting_column].sum(): raise ValueError('Control total exceeds the available samples') # check for probabilities p = get_probs(data, prob_column) # shuffle the rows if p is None: # random shuffle shuff_idx = np.random.permutation(data.index.values) else: # weighted shuffle ran_p = pd.Series(np.power(np.random.rand(len(p)), 1.0 / p), index=data.index) ran_p.sort_values(ascending=False) shuff_idx = ran_p.index.values # get the initial sample shuffle = data.loc[shuff_idx] csum = np.cumsum(shuffle[accounting_column].values) pos = np.searchsorted(csum, total, 'right') sample = shuffle.iloc[:pos] # refine the sample sample_idx = sample.index.values sample_total = sample[accounting_column].sum() shortage = total - sample_total matched = False for idx, row in shuffle.iloc[pos:].iterrows(): if shortage == 0: # we've matached matched = True break # add the current element if it doesnt exceed the total cnt = row[accounting_column] if cnt <= shortage: sample_idx = np.append(sample_idx, idx) shortage -= cnt return shuffle.loc[sample_idx].copy(), matched
python
def accounting_sample_no_replace(total, data, accounting_column, prob_column=None): """ Samples rows with accounting without replacement. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. accounting_column: string Name of column with accounting totals/quantities to apply towards the control. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool Indicates if the total was matched exactly. """ # make sure this is even feasible if total > data[accounting_column].sum(): raise ValueError('Control total exceeds the available samples') # check for probabilities p = get_probs(data, prob_column) # shuffle the rows if p is None: # random shuffle shuff_idx = np.random.permutation(data.index.values) else: # weighted shuffle ran_p = pd.Series(np.power(np.random.rand(len(p)), 1.0 / p), index=data.index) ran_p.sort_values(ascending=False) shuff_idx = ran_p.index.values # get the initial sample shuffle = data.loc[shuff_idx] csum = np.cumsum(shuffle[accounting_column].values) pos = np.searchsorted(csum, total, 'right') sample = shuffle.iloc[:pos] # refine the sample sample_idx = sample.index.values sample_total = sample[accounting_column].sum() shortage = total - sample_total matched = False for idx, row in shuffle.iloc[pos:].iterrows(): if shortage == 0: # we've matached matched = True break # add the current element if it doesnt exceed the total cnt = row[accounting_column] if cnt <= shortage: sample_idx = np.append(sample_idx, idx) shortage -= cnt return shuffle.loc[sample_idx].copy(), matched
[ "def", "accounting_sample_no_replace", "(", "total", ",", "data", ",", "accounting_column", ",", "prob_column", "=", "None", ")", ":", "# make sure this is even feasible", "if", "total", ">", "data", "[", "accounting_column", "]", ".", "sum", "(", ")", ":", "raise", "ValueError", "(", "'Control total exceeds the available samples'", ")", "# check for probabilities", "p", "=", "get_probs", "(", "data", ",", "prob_column", ")", "# shuffle the rows", "if", "p", "is", "None", ":", "# random shuffle", "shuff_idx", "=", "np", ".", "random", ".", "permutation", "(", "data", ".", "index", ".", "values", ")", "else", ":", "# weighted shuffle", "ran_p", "=", "pd", ".", "Series", "(", "np", ".", "power", "(", "np", ".", "random", ".", "rand", "(", "len", "(", "p", ")", ")", ",", "1.0", "/", "p", ")", ",", "index", "=", "data", ".", "index", ")", "ran_p", ".", "sort_values", "(", "ascending", "=", "False", ")", "shuff_idx", "=", "ran_p", ".", "index", ".", "values", "# get the initial sample", "shuffle", "=", "data", ".", "loc", "[", "shuff_idx", "]", "csum", "=", "np", ".", "cumsum", "(", "shuffle", "[", "accounting_column", "]", ".", "values", ")", "pos", "=", "np", ".", "searchsorted", "(", "csum", ",", "total", ",", "'right'", ")", "sample", "=", "shuffle", ".", "iloc", "[", ":", "pos", "]", "# refine the sample", "sample_idx", "=", "sample", ".", "index", ".", "values", "sample_total", "=", "sample", "[", "accounting_column", "]", ".", "sum", "(", ")", "shortage", "=", "total", "-", "sample_total", "matched", "=", "False", "for", "idx", ",", "row", "in", "shuffle", ".", "iloc", "[", "pos", ":", "]", ".", "iterrows", "(", ")", ":", "if", "shortage", "==", "0", ":", "# we've matached", "matched", "=", "True", "break", "# add the current element if it doesnt exceed the total", "cnt", "=", "row", "[", "accounting_column", "]", "if", "cnt", "<=", "shortage", ":", "sample_idx", "=", "np", ".", "append", "(", "sample_idx", ",", "idx", ")", "shortage", "-=", "cnt", "return", "shuffle", ".", "loc", "[", "sample_idx", "]", ".", "copy", "(", ")", ",", "matched" ]
Samples rows with accounting without replacement. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. accounting_column: string Name of column with accounting totals/quantities to apply towards the control. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool Indicates if the total was matched exactly.
[ "Samples", "rows", "with", "accounting", "without", "replacement", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/sampling.py#L108-L172
2,972
UDST/urbansim
urbansim/developer/sqftproforma.py
SqFtProFormaConfig._convert_types
def _convert_types(self): """ convert lists and dictionaries that are useful for users to np vectors that are usable by machines """ self.fars = np.array(self.fars) self.parking_rates = np.array([self.parking_rates[use] for use in self.uses]) self.res_ratios = {} assert len(self.uses) == len(self.residential_uses) for k, v in self.forms.items(): self.forms[k] = np.array([self.forms[k].get(use, 0.0) for use in self.uses]) # normalize if not already self.forms[k] /= self.forms[k].sum() self.res_ratios[k] = pd.Series(self.forms[k])[self.residential_uses].sum() self.costs = np.transpose(np.array([self.costs[use] for use in self.uses]))
python
def _convert_types(self): """ convert lists and dictionaries that are useful for users to np vectors that are usable by machines """ self.fars = np.array(self.fars) self.parking_rates = np.array([self.parking_rates[use] for use in self.uses]) self.res_ratios = {} assert len(self.uses) == len(self.residential_uses) for k, v in self.forms.items(): self.forms[k] = np.array([self.forms[k].get(use, 0.0) for use in self.uses]) # normalize if not already self.forms[k] /= self.forms[k].sum() self.res_ratios[k] = pd.Series(self.forms[k])[self.residential_uses].sum() self.costs = np.transpose(np.array([self.costs[use] for use in self.uses]))
[ "def", "_convert_types", "(", "self", ")", ":", "self", ".", "fars", "=", "np", ".", "array", "(", "self", ".", "fars", ")", "self", ".", "parking_rates", "=", "np", ".", "array", "(", "[", "self", ".", "parking_rates", "[", "use", "]", "for", "use", "in", "self", ".", "uses", "]", ")", "self", ".", "res_ratios", "=", "{", "}", "assert", "len", "(", "self", ".", "uses", ")", "==", "len", "(", "self", ".", "residential_uses", ")", "for", "k", ",", "v", "in", "self", ".", "forms", ".", "items", "(", ")", ":", "self", ".", "forms", "[", "k", "]", "=", "np", ".", "array", "(", "[", "self", ".", "forms", "[", "k", "]", ".", "get", "(", "use", ",", "0.0", ")", "for", "use", "in", "self", ".", "uses", "]", ")", "# normalize if not already", "self", ".", "forms", "[", "k", "]", "/=", "self", ".", "forms", "[", "k", "]", ".", "sum", "(", ")", "self", ".", "res_ratios", "[", "k", "]", "=", "pd", ".", "Series", "(", "self", ".", "forms", "[", "k", "]", ")", "[", "self", ".", "residential_uses", "]", ".", "sum", "(", ")", "self", ".", "costs", "=", "np", ".", "transpose", "(", "np", ".", "array", "(", "[", "self", ".", "costs", "[", "use", "]", "for", "use", "in", "self", ".", "uses", "]", ")", ")" ]
convert lists and dictionaries that are useful for users to np vectors that are usable by machines
[ "convert", "lists", "and", "dictionaries", "that", "are", "useful", "for", "users", "to", "np", "vectors", "that", "are", "usable", "by", "machines" ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L192-L207
2,973
UDST/urbansim
urbansim/developer/sqftproforma.py
SqFtProForma._building_cost
def _building_cost(self, use_mix, stories): """ Generate building cost for a set of buildings Parameters ---------- use_mix : array The mix of uses for this form stories : series A Pandas Series of stories Returns ------- array The cost per sqft for this unit mix and height. """ c = self.config # stories to heights heights = stories * c.height_per_story # cost index for this height costs = np.searchsorted(c.heights_for_costs, heights) # this will get set to nan later costs[np.isnan(heights)] = 0 # compute cost with matrix multiply costs = np.dot(np.squeeze(c.costs[costs.astype('int32')]), use_mix) # some heights aren't allowed - cost should be nan costs[np.isnan(stories).flatten()] = np.nan return costs.flatten()
python
def _building_cost(self, use_mix, stories): """ Generate building cost for a set of buildings Parameters ---------- use_mix : array The mix of uses for this form stories : series A Pandas Series of stories Returns ------- array The cost per sqft for this unit mix and height. """ c = self.config # stories to heights heights = stories * c.height_per_story # cost index for this height costs = np.searchsorted(c.heights_for_costs, heights) # this will get set to nan later costs[np.isnan(heights)] = 0 # compute cost with matrix multiply costs = np.dot(np.squeeze(c.costs[costs.astype('int32')]), use_mix) # some heights aren't allowed - cost should be nan costs[np.isnan(stories).flatten()] = np.nan return costs.flatten()
[ "def", "_building_cost", "(", "self", ",", "use_mix", ",", "stories", ")", ":", "c", "=", "self", ".", "config", "# stories to heights", "heights", "=", "stories", "*", "c", ".", "height_per_story", "# cost index for this height", "costs", "=", "np", ".", "searchsorted", "(", "c", ".", "heights_for_costs", ",", "heights", ")", "# this will get set to nan later", "costs", "[", "np", ".", "isnan", "(", "heights", ")", "]", "=", "0", "# compute cost with matrix multiply", "costs", "=", "np", ".", "dot", "(", "np", ".", "squeeze", "(", "c", ".", "costs", "[", "costs", ".", "astype", "(", "'int32'", ")", "]", ")", ",", "use_mix", ")", "# some heights aren't allowed - cost should be nan", "costs", "[", "np", ".", "isnan", "(", "stories", ")", ".", "flatten", "(", ")", "]", "=", "np", ".", "nan", "return", "costs", ".", "flatten", "(", ")" ]
Generate building cost for a set of buildings Parameters ---------- use_mix : array The mix of uses for this form stories : series A Pandas Series of stories Returns ------- array The cost per sqft for this unit mix and height.
[ "Generate", "building", "cost", "for", "a", "set", "of", "buildings" ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L279-L307
2,974
UDST/urbansim
urbansim/developer/sqftproforma.py
SqFtProForma._generate_lookup
def _generate_lookup(self): """ Run the developer model on all possible inputs specified in the configuration object - not generally called by the user. This part computes the final cost per sqft of the building to construct and then turns it into the yearly rent necessary to make break even on that cost. """ c = self.config # get all the building forms we can use keys = c.forms.keys() keys = sorted(keys) df_d = {} for name in keys: # get the use distribution for each uses_distrib = c.forms[name] for parking_config in c.parking_configs: # going to make a dataframe to store values to make # pro forma results transparent df = pd.DataFrame(index=c.fars) df['far'] = c.fars df['pclsz'] = c.tiled_parcel_sizes building_bulk = np.reshape( c.parcel_sizes, (-1, 1)) * np.reshape(c.fars, (1, -1)) building_bulk = np.reshape(building_bulk, (-1, 1)) # need to converge in on exactly how much far is available for # deck pkg if parking_config == 'deck': building_bulk /= (1.0 + np.sum(uses_distrib * c.parking_rates) * c.parking_sqft_d[parking_config] / c.sqft_per_rate) df['building_sqft'] = building_bulk parkingstalls = building_bulk * \ np.sum(uses_distrib * c.parking_rates) / c.sqft_per_rate parking_cost = (c.parking_cost_d[parking_config] * parkingstalls * c.parking_sqft_d[parking_config]) df['spaces'] = parkingstalls if parking_config == 'underground': df['park_sqft'] = parkingstalls * \ c.parking_sqft_d[parking_config] stories = building_bulk / c.tiled_parcel_sizes if parking_config == 'deck': df['park_sqft'] = parkingstalls * \ c.parking_sqft_d[parking_config] stories = ((building_bulk + parkingstalls * c.parking_sqft_d[parking_config]) / c.tiled_parcel_sizes) if parking_config == 'surface': stories = building_bulk / \ (c.tiled_parcel_sizes - parkingstalls * c.parking_sqft_d[parking_config]) df['park_sqft'] = 0 # not all fars support surface parking stories[stories < 0.0] = np.nan # I think we can assume that stories over 3 # do not work with surface parking stories[stories > 5.0] = np.nan df['total_built_sqft'] = df.building_sqft + df.park_sqft df['parking_sqft_ratio'] = df.park_sqft / df.total_built_sqft stories /= c.parcel_coverage df['stories'] = np.ceil(stories) df['height'] = df.stories * c.height_per_story df['build_cost_sqft'] = self._building_cost(uses_distrib, stories) df['build_cost'] = df.build_cost_sqft * df.building_sqft df['park_cost'] = parking_cost df['cost'] = df.build_cost + df.park_cost df['ave_cost_sqft'] = (df.cost / df.total_built_sqft) * c.profit_factor if name == 'retail': df['ave_cost_sqft'][c.fars > c.max_retail_height] = np.nan if name == 'industrial': df['ave_cost_sqft'][c.fars > c.max_industrial_height] = np.nan df_d[(name, parking_config)] = df self.dev_d = df_d
python
def _generate_lookup(self): """ Run the developer model on all possible inputs specified in the configuration object - not generally called by the user. This part computes the final cost per sqft of the building to construct and then turns it into the yearly rent necessary to make break even on that cost. """ c = self.config # get all the building forms we can use keys = c.forms.keys() keys = sorted(keys) df_d = {} for name in keys: # get the use distribution for each uses_distrib = c.forms[name] for parking_config in c.parking_configs: # going to make a dataframe to store values to make # pro forma results transparent df = pd.DataFrame(index=c.fars) df['far'] = c.fars df['pclsz'] = c.tiled_parcel_sizes building_bulk = np.reshape( c.parcel_sizes, (-1, 1)) * np.reshape(c.fars, (1, -1)) building_bulk = np.reshape(building_bulk, (-1, 1)) # need to converge in on exactly how much far is available for # deck pkg if parking_config == 'deck': building_bulk /= (1.0 + np.sum(uses_distrib * c.parking_rates) * c.parking_sqft_d[parking_config] / c.sqft_per_rate) df['building_sqft'] = building_bulk parkingstalls = building_bulk * \ np.sum(uses_distrib * c.parking_rates) / c.sqft_per_rate parking_cost = (c.parking_cost_d[parking_config] * parkingstalls * c.parking_sqft_d[parking_config]) df['spaces'] = parkingstalls if parking_config == 'underground': df['park_sqft'] = parkingstalls * \ c.parking_sqft_d[parking_config] stories = building_bulk / c.tiled_parcel_sizes if parking_config == 'deck': df['park_sqft'] = parkingstalls * \ c.parking_sqft_d[parking_config] stories = ((building_bulk + parkingstalls * c.parking_sqft_d[parking_config]) / c.tiled_parcel_sizes) if parking_config == 'surface': stories = building_bulk / \ (c.tiled_parcel_sizes - parkingstalls * c.parking_sqft_d[parking_config]) df['park_sqft'] = 0 # not all fars support surface parking stories[stories < 0.0] = np.nan # I think we can assume that stories over 3 # do not work with surface parking stories[stories > 5.0] = np.nan df['total_built_sqft'] = df.building_sqft + df.park_sqft df['parking_sqft_ratio'] = df.park_sqft / df.total_built_sqft stories /= c.parcel_coverage df['stories'] = np.ceil(stories) df['height'] = df.stories * c.height_per_story df['build_cost_sqft'] = self._building_cost(uses_distrib, stories) df['build_cost'] = df.build_cost_sqft * df.building_sqft df['park_cost'] = parking_cost df['cost'] = df.build_cost + df.park_cost df['ave_cost_sqft'] = (df.cost / df.total_built_sqft) * c.profit_factor if name == 'retail': df['ave_cost_sqft'][c.fars > c.max_retail_height] = np.nan if name == 'industrial': df['ave_cost_sqft'][c.fars > c.max_industrial_height] = np.nan df_d[(name, parking_config)] = df self.dev_d = df_d
[ "def", "_generate_lookup", "(", "self", ")", ":", "c", "=", "self", ".", "config", "# get all the building forms we can use", "keys", "=", "c", ".", "forms", ".", "keys", "(", ")", "keys", "=", "sorted", "(", "keys", ")", "df_d", "=", "{", "}", "for", "name", "in", "keys", ":", "# get the use distribution for each", "uses_distrib", "=", "c", ".", "forms", "[", "name", "]", "for", "parking_config", "in", "c", ".", "parking_configs", ":", "# going to make a dataframe to store values to make", "# pro forma results transparent", "df", "=", "pd", ".", "DataFrame", "(", "index", "=", "c", ".", "fars", ")", "df", "[", "'far'", "]", "=", "c", ".", "fars", "df", "[", "'pclsz'", "]", "=", "c", ".", "tiled_parcel_sizes", "building_bulk", "=", "np", ".", "reshape", "(", "c", ".", "parcel_sizes", ",", "(", "-", "1", ",", "1", ")", ")", "*", "np", ".", "reshape", "(", "c", ".", "fars", ",", "(", "1", ",", "-", "1", ")", ")", "building_bulk", "=", "np", ".", "reshape", "(", "building_bulk", ",", "(", "-", "1", ",", "1", ")", ")", "# need to converge in on exactly how much far is available for", "# deck pkg", "if", "parking_config", "==", "'deck'", ":", "building_bulk", "/=", "(", "1.0", "+", "np", ".", "sum", "(", "uses_distrib", "*", "c", ".", "parking_rates", ")", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", "/", "c", ".", "sqft_per_rate", ")", "df", "[", "'building_sqft'", "]", "=", "building_bulk", "parkingstalls", "=", "building_bulk", "*", "np", ".", "sum", "(", "uses_distrib", "*", "c", ".", "parking_rates", ")", "/", "c", ".", "sqft_per_rate", "parking_cost", "=", "(", "c", ".", "parking_cost_d", "[", "parking_config", "]", "*", "parkingstalls", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", ")", "df", "[", "'spaces'", "]", "=", "parkingstalls", "if", "parking_config", "==", "'underground'", ":", "df", "[", "'park_sqft'", "]", "=", "parkingstalls", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", "stories", "=", "building_bulk", "/", "c", ".", "tiled_parcel_sizes", "if", "parking_config", "==", "'deck'", ":", "df", "[", "'park_sqft'", "]", "=", "parkingstalls", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", "stories", "=", "(", "(", "building_bulk", "+", "parkingstalls", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", ")", "/", "c", ".", "tiled_parcel_sizes", ")", "if", "parking_config", "==", "'surface'", ":", "stories", "=", "building_bulk", "/", "(", "c", ".", "tiled_parcel_sizes", "-", "parkingstalls", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", ")", "df", "[", "'park_sqft'", "]", "=", "0", "# not all fars support surface parking", "stories", "[", "stories", "<", "0.0", "]", "=", "np", ".", "nan", "# I think we can assume that stories over 3", "# do not work with surface parking", "stories", "[", "stories", ">", "5.0", "]", "=", "np", ".", "nan", "df", "[", "'total_built_sqft'", "]", "=", "df", ".", "building_sqft", "+", "df", ".", "park_sqft", "df", "[", "'parking_sqft_ratio'", "]", "=", "df", ".", "park_sqft", "/", "df", ".", "total_built_sqft", "stories", "/=", "c", ".", "parcel_coverage", "df", "[", "'stories'", "]", "=", "np", ".", "ceil", "(", "stories", ")", "df", "[", "'height'", "]", "=", "df", ".", "stories", "*", "c", ".", "height_per_story", "df", "[", "'build_cost_sqft'", "]", "=", "self", ".", "_building_cost", "(", "uses_distrib", ",", "stories", ")", "df", "[", "'build_cost'", "]", "=", "df", ".", "build_cost_sqft", "*", "df", ".", "building_sqft", "df", "[", "'park_cost'", "]", "=", "parking_cost", "df", "[", "'cost'", "]", "=", "df", ".", "build_cost", "+", "df", ".", "park_cost", "df", "[", "'ave_cost_sqft'", "]", "=", "(", "df", ".", "cost", "/", "df", ".", "total_built_sqft", ")", "*", "c", ".", "profit_factor", "if", "name", "==", "'retail'", ":", "df", "[", "'ave_cost_sqft'", "]", "[", "c", ".", "fars", ">", "c", ".", "max_retail_height", "]", "=", "np", ".", "nan", "if", "name", "==", "'industrial'", ":", "df", "[", "'ave_cost_sqft'", "]", "[", "c", ".", "fars", ">", "c", ".", "max_industrial_height", "]", "=", "np", ".", "nan", "df_d", "[", "(", "name", ",", "parking_config", ")", "]", "=", "df", "self", ".", "dev_d", "=", "df_d" ]
Run the developer model on all possible inputs specified in the configuration object - not generally called by the user. This part computes the final cost per sqft of the building to construct and then turns it into the yearly rent necessary to make break even on that cost.
[ "Run", "the", "developer", "model", "on", "all", "possible", "inputs", "specified", "in", "the", "configuration", "object", "-", "not", "generally", "called", "by", "the", "user", ".", "This", "part", "computes", "the", "final", "cost", "per", "sqft", "of", "the", "building", "to", "construct", "and", "then", "turns", "it", "into", "the", "yearly", "rent", "necessary", "to", "make", "break", "even", "on", "that", "cost", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L309-L398
2,975
UDST/urbansim
urbansim/developer/sqftproforma.py
SqFtProForma.lookup
def lookup(self, form, df, only_built=True, pass_through=None): """ This function does the developer model lookups for all the actual input data. Parameters ---------- form : string One of the forms specified in the configuration file df: dataframe Pass in a single data frame which is indexed by parcel_id and has the following columns only_built : bool Whether to return only those buildings that are profitable and allowed by zoning, or whether to return as much information as possible, even if unlikely to be built (can be used when development might be subsidized or when debugging) pass_through : list of strings List of field names to take from the input parcel frame and pass to the output feasibility frame - is usually used for debugging purposes - these fields will be passed all the way through developer Input Dataframe Columns rent : dataframe A set of columns, one for each of the uses passed in the configuration. Values are yearly rents for that use. Typical column names would be "residential", "retail", "industrial" and "office" land_cost : series A series representing the CURRENT yearly rent for each parcel. Used to compute acquisition costs for the parcel. parcel_size : series A series representing the parcel size for each parcel. max_far : series A series representing the maximum far allowed by zoning. Buildings will not be built above these fars. max_height : series A series representing the maxmium height allowed by zoning. Buildings will not be built above these heights. Will pick between the min of the far and height, will ignore on of them if one is nan, but will not build if both are nan. max_dua : series, optional A series representing the maximum dwelling units per acre allowed by zoning. If max_dua is passed, the average unit size should be passed below to translate from dua to floor space. ave_unit_size : series, optional This is required if max_dua is passed above, otherwise it is optional. This is the same as the parameter to Developer.pick() (it should be the same series). Returns ------- index : Series, int parcel identifiers building_sqft : Series, float The number of square feet for the building to build. Keep in mind this includes parking and common space. Will need a helpful function to convert from gross square feet to actual usable square feet in residential units. building_cost : Series, float The cost of constructing the building as given by the ave_cost_per_sqft from the cost model (for this FAR) and the number of square feet. total_cost : Series, float The cost of constructing the building plus the cost of acquisition of the current parcel/building. building_revenue : Series, float The NPV of the revenue for the building to be built, which is the number of square feet times the yearly rent divided by the cap rate (with a few adjustment factors including building efficiency). max_profit_far : Series, float The FAR of the maximum profit building (constrained by the max_far and max_height from the input dataframe). max_profit : The profit for the maximum profit building (constrained by the max_far and max_height from the input dataframe). """ df = pd.concat(self._lookup_parking_cfg(form, parking_config, df, only_built, pass_through) for parking_config in self.config.parking_configs) if len(df) == 0: return pd.DataFrame() max_profit_ind = df.pivot( columns="parking_config", values="max_profit").idxmax(axis=1).to_frame("parking_config") df.set_index(["parking_config"], append=True, inplace=True) max_profit_ind.set_index(["parking_config"], append=True, inplace=True) # get the max_profit idx return df.loc[max_profit_ind.index].reset_index(1)
python
def lookup(self, form, df, only_built=True, pass_through=None): """ This function does the developer model lookups for all the actual input data. Parameters ---------- form : string One of the forms specified in the configuration file df: dataframe Pass in a single data frame which is indexed by parcel_id and has the following columns only_built : bool Whether to return only those buildings that are profitable and allowed by zoning, or whether to return as much information as possible, even if unlikely to be built (can be used when development might be subsidized or when debugging) pass_through : list of strings List of field names to take from the input parcel frame and pass to the output feasibility frame - is usually used for debugging purposes - these fields will be passed all the way through developer Input Dataframe Columns rent : dataframe A set of columns, one for each of the uses passed in the configuration. Values are yearly rents for that use. Typical column names would be "residential", "retail", "industrial" and "office" land_cost : series A series representing the CURRENT yearly rent for each parcel. Used to compute acquisition costs for the parcel. parcel_size : series A series representing the parcel size for each parcel. max_far : series A series representing the maximum far allowed by zoning. Buildings will not be built above these fars. max_height : series A series representing the maxmium height allowed by zoning. Buildings will not be built above these heights. Will pick between the min of the far and height, will ignore on of them if one is nan, but will not build if both are nan. max_dua : series, optional A series representing the maximum dwelling units per acre allowed by zoning. If max_dua is passed, the average unit size should be passed below to translate from dua to floor space. ave_unit_size : series, optional This is required if max_dua is passed above, otherwise it is optional. This is the same as the parameter to Developer.pick() (it should be the same series). Returns ------- index : Series, int parcel identifiers building_sqft : Series, float The number of square feet for the building to build. Keep in mind this includes parking and common space. Will need a helpful function to convert from gross square feet to actual usable square feet in residential units. building_cost : Series, float The cost of constructing the building as given by the ave_cost_per_sqft from the cost model (for this FAR) and the number of square feet. total_cost : Series, float The cost of constructing the building plus the cost of acquisition of the current parcel/building. building_revenue : Series, float The NPV of the revenue for the building to be built, which is the number of square feet times the yearly rent divided by the cap rate (with a few adjustment factors including building efficiency). max_profit_far : Series, float The FAR of the maximum profit building (constrained by the max_far and max_height from the input dataframe). max_profit : The profit for the maximum profit building (constrained by the max_far and max_height from the input dataframe). """ df = pd.concat(self._lookup_parking_cfg(form, parking_config, df, only_built, pass_through) for parking_config in self.config.parking_configs) if len(df) == 0: return pd.DataFrame() max_profit_ind = df.pivot( columns="parking_config", values="max_profit").idxmax(axis=1).to_frame("parking_config") df.set_index(["parking_config"], append=True, inplace=True) max_profit_ind.set_index(["parking_config"], append=True, inplace=True) # get the max_profit idx return df.loc[max_profit_ind.index].reset_index(1)
[ "def", "lookup", "(", "self", ",", "form", ",", "df", ",", "only_built", "=", "True", ",", "pass_through", "=", "None", ")", ":", "df", "=", "pd", ".", "concat", "(", "self", ".", "_lookup_parking_cfg", "(", "form", ",", "parking_config", ",", "df", ",", "only_built", ",", "pass_through", ")", "for", "parking_config", "in", "self", ".", "config", ".", "parking_configs", ")", "if", "len", "(", "df", ")", "==", "0", ":", "return", "pd", ".", "DataFrame", "(", ")", "max_profit_ind", "=", "df", ".", "pivot", "(", "columns", "=", "\"parking_config\"", ",", "values", "=", "\"max_profit\"", ")", ".", "idxmax", "(", "axis", "=", "1", ")", ".", "to_frame", "(", "\"parking_config\"", ")", "df", ".", "set_index", "(", "[", "\"parking_config\"", "]", ",", "append", "=", "True", ",", "inplace", "=", "True", ")", "max_profit_ind", ".", "set_index", "(", "[", "\"parking_config\"", "]", ",", "append", "=", "True", ",", "inplace", "=", "True", ")", "# get the max_profit idx", "return", "df", ".", "loc", "[", "max_profit_ind", ".", "index", "]", ".", "reset_index", "(", "1", ")" ]
This function does the developer model lookups for all the actual input data. Parameters ---------- form : string One of the forms specified in the configuration file df: dataframe Pass in a single data frame which is indexed by parcel_id and has the following columns only_built : bool Whether to return only those buildings that are profitable and allowed by zoning, or whether to return as much information as possible, even if unlikely to be built (can be used when development might be subsidized or when debugging) pass_through : list of strings List of field names to take from the input parcel frame and pass to the output feasibility frame - is usually used for debugging purposes - these fields will be passed all the way through developer Input Dataframe Columns rent : dataframe A set of columns, one for each of the uses passed in the configuration. Values are yearly rents for that use. Typical column names would be "residential", "retail", "industrial" and "office" land_cost : series A series representing the CURRENT yearly rent for each parcel. Used to compute acquisition costs for the parcel. parcel_size : series A series representing the parcel size for each parcel. max_far : series A series representing the maximum far allowed by zoning. Buildings will not be built above these fars. max_height : series A series representing the maxmium height allowed by zoning. Buildings will not be built above these heights. Will pick between the min of the far and height, will ignore on of them if one is nan, but will not build if both are nan. max_dua : series, optional A series representing the maximum dwelling units per acre allowed by zoning. If max_dua is passed, the average unit size should be passed below to translate from dua to floor space. ave_unit_size : series, optional This is required if max_dua is passed above, otherwise it is optional. This is the same as the parameter to Developer.pick() (it should be the same series). Returns ------- index : Series, int parcel identifiers building_sqft : Series, float The number of square feet for the building to build. Keep in mind this includes parking and common space. Will need a helpful function to convert from gross square feet to actual usable square feet in residential units. building_cost : Series, float The cost of constructing the building as given by the ave_cost_per_sqft from the cost model (for this FAR) and the number of square feet. total_cost : Series, float The cost of constructing the building plus the cost of acquisition of the current parcel/building. building_revenue : Series, float The NPV of the revenue for the building to be built, which is the number of square feet times the yearly rent divided by the cap rate (with a few adjustment factors including building efficiency). max_profit_far : Series, float The FAR of the maximum profit building (constrained by the max_far and max_height from the input dataframe). max_profit : The profit for the maximum profit building (constrained by the max_far and max_height from the input dataframe).
[ "This", "function", "does", "the", "developer", "model", "lookups", "for", "all", "the", "actual", "input", "data", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L445-L537
2,976
UDST/urbansim
urbansim/developer/sqftproforma.py
SqFtProForma._debug_output
def _debug_output(self): """ this code creates the debugging plots to understand the behavior of the hypothetical building model """ import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt c = self.config df_d = self.dev_d keys = df_d.keys() keys = sorted(keys) for key in keys: logger.debug("\n" + str(key) + "\n") logger.debug(df_d[key]) for form in self.config.forms: logger.debug("\n" + str(key) + "\n") logger.debug(self.get_ave_cost_sqft(form, "surface")) keys = c.forms.keys() keys = sorted(keys) cnt = 1 share = None fig = plt.figure(figsize=(12, 3 * len(keys))) fig.suptitle('Profitable rents by use', fontsize=40) for name in keys: sumdf = None for parking_config in c.parking_configs: df = df_d[(name, parking_config)] if sumdf is None: sumdf = pd.DataFrame(df['far']) sumdf[parking_config] = df['ave_cost_sqft'] far = sumdf['far'] del sumdf['far'] if share is None: share = plt.subplot(len(keys) / 2, 2, cnt) else: plt.subplot(len(keys) / 2, 2, cnt, sharex=share, sharey=share) handles = plt.plot(far, sumdf) plt.ylabel('even_rent') plt.xlabel('FAR') plt.title('Rents for use type %s' % name) plt.legend( handles, c.parking_configs, loc='lower right', title='Parking type') cnt += 1 plt.savefig('even_rents.png', bbox_inches=0)
python
def _debug_output(self): """ this code creates the debugging plots to understand the behavior of the hypothetical building model """ import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt c = self.config df_d = self.dev_d keys = df_d.keys() keys = sorted(keys) for key in keys: logger.debug("\n" + str(key) + "\n") logger.debug(df_d[key]) for form in self.config.forms: logger.debug("\n" + str(key) + "\n") logger.debug(self.get_ave_cost_sqft(form, "surface")) keys = c.forms.keys() keys = sorted(keys) cnt = 1 share = None fig = plt.figure(figsize=(12, 3 * len(keys))) fig.suptitle('Profitable rents by use', fontsize=40) for name in keys: sumdf = None for parking_config in c.parking_configs: df = df_d[(name, parking_config)] if sumdf is None: sumdf = pd.DataFrame(df['far']) sumdf[parking_config] = df['ave_cost_sqft'] far = sumdf['far'] del sumdf['far'] if share is None: share = plt.subplot(len(keys) / 2, 2, cnt) else: plt.subplot(len(keys) / 2, 2, cnt, sharex=share, sharey=share) handles = plt.plot(far, sumdf) plt.ylabel('even_rent') plt.xlabel('FAR') plt.title('Rents for use type %s' % name) plt.legend( handles, c.parking_configs, loc='lower right', title='Parking type') cnt += 1 plt.savefig('even_rents.png', bbox_inches=0)
[ "def", "_debug_output", "(", "self", ")", ":", "import", "matplotlib", "matplotlib", ".", "use", "(", "'Agg'", ")", "import", "matplotlib", ".", "pyplot", "as", "plt", "c", "=", "self", ".", "config", "df_d", "=", "self", ".", "dev_d", "keys", "=", "df_d", ".", "keys", "(", ")", "keys", "=", "sorted", "(", "keys", ")", "for", "key", "in", "keys", ":", "logger", ".", "debug", "(", "\"\\n\"", "+", "str", "(", "key", ")", "+", "\"\\n\"", ")", "logger", ".", "debug", "(", "df_d", "[", "key", "]", ")", "for", "form", "in", "self", ".", "config", ".", "forms", ":", "logger", ".", "debug", "(", "\"\\n\"", "+", "str", "(", "key", ")", "+", "\"\\n\"", ")", "logger", ".", "debug", "(", "self", ".", "get_ave_cost_sqft", "(", "form", ",", "\"surface\"", ")", ")", "keys", "=", "c", ".", "forms", ".", "keys", "(", ")", "keys", "=", "sorted", "(", "keys", ")", "cnt", "=", "1", "share", "=", "None", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "12", ",", "3", "*", "len", "(", "keys", ")", ")", ")", "fig", ".", "suptitle", "(", "'Profitable rents by use'", ",", "fontsize", "=", "40", ")", "for", "name", "in", "keys", ":", "sumdf", "=", "None", "for", "parking_config", "in", "c", ".", "parking_configs", ":", "df", "=", "df_d", "[", "(", "name", ",", "parking_config", ")", "]", "if", "sumdf", "is", "None", ":", "sumdf", "=", "pd", ".", "DataFrame", "(", "df", "[", "'far'", "]", ")", "sumdf", "[", "parking_config", "]", "=", "df", "[", "'ave_cost_sqft'", "]", "far", "=", "sumdf", "[", "'far'", "]", "del", "sumdf", "[", "'far'", "]", "if", "share", "is", "None", ":", "share", "=", "plt", ".", "subplot", "(", "len", "(", "keys", ")", "/", "2", ",", "2", ",", "cnt", ")", "else", ":", "plt", ".", "subplot", "(", "len", "(", "keys", ")", "/", "2", ",", "2", ",", "cnt", ",", "sharex", "=", "share", ",", "sharey", "=", "share", ")", "handles", "=", "plt", ".", "plot", "(", "far", ",", "sumdf", ")", "plt", ".", "ylabel", "(", "'even_rent'", ")", "plt", ".", "xlabel", "(", "'FAR'", ")", "plt", ".", "title", "(", "'Rents for use type %s'", "%", "name", ")", "plt", ".", "legend", "(", "handles", ",", "c", ".", "parking_configs", ",", "loc", "=", "'lower right'", ",", "title", "=", "'Parking type'", ")", "cnt", "+=", "1", "plt", ".", "savefig", "(", "'even_rents.png'", ",", "bbox_inches", "=", "0", ")" ]
this code creates the debugging plots to understand the behavior of the hypothetical building model
[ "this", "code", "creates", "the", "debugging", "plots", "to", "understand", "the", "behavior", "of", "the", "hypothetical", "building", "model" ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L666-L716
2,977
UDST/urbansim
urbansim/models/transition.py
add_rows
def add_rows(data, nrows, starting_index=None, accounting_column=None): """ Add rows to data table according to a given nrows. New rows will have their IDs set to NaN. Parameters ---------- data : pandas.DataFrame nrows : int Number of rows to add. starting_index : int, optional The starting index from which to calculate indexes for the new rows. If not given the max + 1 of the index of `data` will be used. accounting_column: string, optional Name of column with accounting totals/quanties to apply towards the control. If not provided then row counts will be used for accounting. Returns ------- updated : pandas.DataFrame Table with rows added. New rows will have their index values set to NaN. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries. """ logger.debug('start: adding {} rows in transition model'.format(nrows)) if nrows == 0: return data, _empty_index(), _empty_index() if not starting_index: starting_index = data.index.values.max() + 1 new_rows = sample_rows(nrows, data, accounting_column=accounting_column) copied_index = new_rows.index added_index = pd.Index(np.arange( starting_index, starting_index + len(new_rows.index), dtype=np.int)) new_rows.index = added_index logger.debug( 'finish: added {} rows in transition model'.format(len(new_rows))) return pd.concat([data, new_rows]), added_index, copied_index
python
def add_rows(data, nrows, starting_index=None, accounting_column=None): """ Add rows to data table according to a given nrows. New rows will have their IDs set to NaN. Parameters ---------- data : pandas.DataFrame nrows : int Number of rows to add. starting_index : int, optional The starting index from which to calculate indexes for the new rows. If not given the max + 1 of the index of `data` will be used. accounting_column: string, optional Name of column with accounting totals/quanties to apply towards the control. If not provided then row counts will be used for accounting. Returns ------- updated : pandas.DataFrame Table with rows added. New rows will have their index values set to NaN. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries. """ logger.debug('start: adding {} rows in transition model'.format(nrows)) if nrows == 0: return data, _empty_index(), _empty_index() if not starting_index: starting_index = data.index.values.max() + 1 new_rows = sample_rows(nrows, data, accounting_column=accounting_column) copied_index = new_rows.index added_index = pd.Index(np.arange( starting_index, starting_index + len(new_rows.index), dtype=np.int)) new_rows.index = added_index logger.debug( 'finish: added {} rows in transition model'.format(len(new_rows))) return pd.concat([data, new_rows]), added_index, copied_index
[ "def", "add_rows", "(", "data", ",", "nrows", ",", "starting_index", "=", "None", ",", "accounting_column", "=", "None", ")", ":", "logger", ".", "debug", "(", "'start: adding {} rows in transition model'", ".", "format", "(", "nrows", ")", ")", "if", "nrows", "==", "0", ":", "return", "data", ",", "_empty_index", "(", ")", ",", "_empty_index", "(", ")", "if", "not", "starting_index", ":", "starting_index", "=", "data", ".", "index", ".", "values", ".", "max", "(", ")", "+", "1", "new_rows", "=", "sample_rows", "(", "nrows", ",", "data", ",", "accounting_column", "=", "accounting_column", ")", "copied_index", "=", "new_rows", ".", "index", "added_index", "=", "pd", ".", "Index", "(", "np", ".", "arange", "(", "starting_index", ",", "starting_index", "+", "len", "(", "new_rows", ".", "index", ")", ",", "dtype", "=", "np", ".", "int", ")", ")", "new_rows", ".", "index", "=", "added_index", "logger", ".", "debug", "(", "'finish: added {} rows in transition model'", ".", "format", "(", "len", "(", "new_rows", ")", ")", ")", "return", "pd", ".", "concat", "(", "[", "data", ",", "new_rows", "]", ")", ",", "added_index", ",", "copied_index" ]
Add rows to data table according to a given nrows. New rows will have their IDs set to NaN. Parameters ---------- data : pandas.DataFrame nrows : int Number of rows to add. starting_index : int, optional The starting index from which to calculate indexes for the new rows. If not given the max + 1 of the index of `data` will be used. accounting_column: string, optional Name of column with accounting totals/quanties to apply towards the control. If not provided then row counts will be used for accounting. Returns ------- updated : pandas.DataFrame Table with rows added. New rows will have their index values set to NaN. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries.
[ "Add", "rows", "to", "data", "table", "according", "to", "a", "given", "nrows", ".", "New", "rows", "will", "have", "their", "IDs", "set", "to", "NaN", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L24-L68
2,978
UDST/urbansim
urbansim/models/transition.py
remove_rows
def remove_rows(data, nrows, accounting_column=None): """ Remove a random `nrows` number of rows from a table. Parameters ---------- data : DataFrame nrows : float Number of rows to remove. accounting_column: string, optional Name of column with accounting totals/quanties to apply towards the control. If not provided then row counts will be used for accounting. Returns ------- updated : pandas.DataFrame Table with random rows removed. removed : pandas.Index Indexes of the rows removed from the table. """ logger.debug('start: removing {} rows in transition model'.format(nrows)) nrows = abs(nrows) # in case a negative number came in unit_check = data[accounting_column].sum() if accounting_column else len(data) if nrows == 0: return data, _empty_index() elif nrows > unit_check: raise ValueError('Number of rows to remove exceeds number of records in table.') remove_rows = sample_rows(nrows, data, accounting_column=accounting_column, replace=False) remove_index = remove_rows.index logger.debug('finish: removed {} rows in transition model'.format(nrows)) return data.loc[data.index.difference(remove_index)], remove_index
python
def remove_rows(data, nrows, accounting_column=None): """ Remove a random `nrows` number of rows from a table. Parameters ---------- data : DataFrame nrows : float Number of rows to remove. accounting_column: string, optional Name of column with accounting totals/quanties to apply towards the control. If not provided then row counts will be used for accounting. Returns ------- updated : pandas.DataFrame Table with random rows removed. removed : pandas.Index Indexes of the rows removed from the table. """ logger.debug('start: removing {} rows in transition model'.format(nrows)) nrows = abs(nrows) # in case a negative number came in unit_check = data[accounting_column].sum() if accounting_column else len(data) if nrows == 0: return data, _empty_index() elif nrows > unit_check: raise ValueError('Number of rows to remove exceeds number of records in table.') remove_rows = sample_rows(nrows, data, accounting_column=accounting_column, replace=False) remove_index = remove_rows.index logger.debug('finish: removed {} rows in transition model'.format(nrows)) return data.loc[data.index.difference(remove_index)], remove_index
[ "def", "remove_rows", "(", "data", ",", "nrows", ",", "accounting_column", "=", "None", ")", ":", "logger", ".", "debug", "(", "'start: removing {} rows in transition model'", ".", "format", "(", "nrows", ")", ")", "nrows", "=", "abs", "(", "nrows", ")", "# in case a negative number came in", "unit_check", "=", "data", "[", "accounting_column", "]", ".", "sum", "(", ")", "if", "accounting_column", "else", "len", "(", "data", ")", "if", "nrows", "==", "0", ":", "return", "data", ",", "_empty_index", "(", ")", "elif", "nrows", ">", "unit_check", ":", "raise", "ValueError", "(", "'Number of rows to remove exceeds number of records in table.'", ")", "remove_rows", "=", "sample_rows", "(", "nrows", ",", "data", ",", "accounting_column", "=", "accounting_column", ",", "replace", "=", "False", ")", "remove_index", "=", "remove_rows", ".", "index", "logger", ".", "debug", "(", "'finish: removed {} rows in transition model'", ".", "format", "(", "nrows", ")", ")", "return", "data", ".", "loc", "[", "data", ".", "index", ".", "difference", "(", "remove_index", ")", "]", ",", "remove_index" ]
Remove a random `nrows` number of rows from a table. Parameters ---------- data : DataFrame nrows : float Number of rows to remove. accounting_column: string, optional Name of column with accounting totals/quanties to apply towards the control. If not provided then row counts will be used for accounting. Returns ------- updated : pandas.DataFrame Table with random rows removed. removed : pandas.Index Indexes of the rows removed from the table.
[ "Remove", "a", "random", "nrows", "number", "of", "rows", "from", "a", "table", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L71-L104
2,979
UDST/urbansim
urbansim/models/transition.py
_update_linked_table
def _update_linked_table(table, col_name, added, copied, removed): """ Copy and update rows in a table that has a column referencing another table that has had rows added via copying. Parameters ---------- table : pandas.DataFrame Table to update with new or removed rows. col_name : str Name of column in `table` that corresponds to the index values in `copied` and `removed`. added : pandas.Index Indexes of rows that are new in the linked table. copied : pandas.Index Indexes of rows that were copied to make new rows in linked table. removed : pandas.Index Indexes of rows that were removed from the linked table. Returns ------- updated : pandas.DataFrame """ logger.debug('start: update linked table after transition') # handle removals table = table.loc[~table[col_name].isin(set(removed))] if (added is None or len(added) == 0): return table # map new IDs to the IDs from which they were copied id_map = pd.concat([pd.Series(copied, name=col_name), pd.Series(added, name='temp_id')], axis=1) # join to linked table and assign new id new_rows = id_map.merge(table, on=col_name) new_rows.drop(col_name, axis=1, inplace=True) new_rows.rename(columns={'temp_id': col_name}, inplace=True) # index the new rows starting_index = table.index.values.max() + 1 new_rows.index = np.arange(starting_index, starting_index + len(new_rows), dtype=np.int) logger.debug('finish: update linked table after transition') return pd.concat([table, new_rows])
python
def _update_linked_table(table, col_name, added, copied, removed): """ Copy and update rows in a table that has a column referencing another table that has had rows added via copying. Parameters ---------- table : pandas.DataFrame Table to update with new or removed rows. col_name : str Name of column in `table` that corresponds to the index values in `copied` and `removed`. added : pandas.Index Indexes of rows that are new in the linked table. copied : pandas.Index Indexes of rows that were copied to make new rows in linked table. removed : pandas.Index Indexes of rows that were removed from the linked table. Returns ------- updated : pandas.DataFrame """ logger.debug('start: update linked table after transition') # handle removals table = table.loc[~table[col_name].isin(set(removed))] if (added is None or len(added) == 0): return table # map new IDs to the IDs from which they were copied id_map = pd.concat([pd.Series(copied, name=col_name), pd.Series(added, name='temp_id')], axis=1) # join to linked table and assign new id new_rows = id_map.merge(table, on=col_name) new_rows.drop(col_name, axis=1, inplace=True) new_rows.rename(columns={'temp_id': col_name}, inplace=True) # index the new rows starting_index = table.index.values.max() + 1 new_rows.index = np.arange(starting_index, starting_index + len(new_rows), dtype=np.int) logger.debug('finish: update linked table after transition') return pd.concat([table, new_rows])
[ "def", "_update_linked_table", "(", "table", ",", "col_name", ",", "added", ",", "copied", ",", "removed", ")", ":", "logger", ".", "debug", "(", "'start: update linked table after transition'", ")", "# handle removals", "table", "=", "table", ".", "loc", "[", "~", "table", "[", "col_name", "]", ".", "isin", "(", "set", "(", "removed", ")", ")", "]", "if", "(", "added", "is", "None", "or", "len", "(", "added", ")", "==", "0", ")", ":", "return", "table", "# map new IDs to the IDs from which they were copied", "id_map", "=", "pd", ".", "concat", "(", "[", "pd", ".", "Series", "(", "copied", ",", "name", "=", "col_name", ")", ",", "pd", ".", "Series", "(", "added", ",", "name", "=", "'temp_id'", ")", "]", ",", "axis", "=", "1", ")", "# join to linked table and assign new id", "new_rows", "=", "id_map", ".", "merge", "(", "table", ",", "on", "=", "col_name", ")", "new_rows", ".", "drop", "(", "col_name", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "new_rows", ".", "rename", "(", "columns", "=", "{", "'temp_id'", ":", "col_name", "}", ",", "inplace", "=", "True", ")", "# index the new rows", "starting_index", "=", "table", ".", "index", ".", "values", ".", "max", "(", ")", "+", "1", "new_rows", ".", "index", "=", "np", ".", "arange", "(", "starting_index", ",", "starting_index", "+", "len", "(", "new_rows", ")", ",", "dtype", "=", "np", ".", "int", ")", "logger", ".", "debug", "(", "'finish: update linked table after transition'", ")", "return", "pd", ".", "concat", "(", "[", "table", ",", "new_rows", "]", ")" ]
Copy and update rows in a table that has a column referencing another table that has had rows added via copying. Parameters ---------- table : pandas.DataFrame Table to update with new or removed rows. col_name : str Name of column in `table` that corresponds to the index values in `copied` and `removed`. added : pandas.Index Indexes of rows that are new in the linked table. copied : pandas.Index Indexes of rows that were copied to make new rows in linked table. removed : pandas.Index Indexes of rows that were removed from the linked table. Returns ------- updated : pandas.DataFrame
[ "Copy", "and", "update", "rows", "in", "a", "table", "that", "has", "a", "column", "referencing", "another", "table", "that", "has", "had", "rows", "added", "via", "copying", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L424-L468
2,980
UDST/urbansim
urbansim/models/transition.py
TransitionModel.transition
def transition(self, data, year, linked_tables=None): """ Add or remove rows from a table based on population targets. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : int Year number that will be passed to `transitioner`. linked_tables : dict of tuple, optional Dictionary of (table, 'column name') pairs. The column name should match the index of `data`. Indexes in `data` that are copied or removed will also be copied and removed in linked tables. They dictionary keys are used in the returned `updated_links`. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Series Indexes of new rows in `updated`. updated_links : dict of pandas.DataFrame """ logger.debug('start: transition') linked_tables = linked_tables or {} updated_links = {} with log_start_finish('add/remove rows', logger): updated, added, copied, removed = self.transitioner(data, year) for table_name, (table, col) in linked_tables.items(): logger.debug('updating linked table {}'.format(table_name)) updated_links[table_name] = \ _update_linked_table(table, col, added, copied, removed) logger.debug('finish: transition') return updated, added, updated_links
python
def transition(self, data, year, linked_tables=None): """ Add or remove rows from a table based on population targets. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : int Year number that will be passed to `transitioner`. linked_tables : dict of tuple, optional Dictionary of (table, 'column name') pairs. The column name should match the index of `data`. Indexes in `data` that are copied or removed will also be copied and removed in linked tables. They dictionary keys are used in the returned `updated_links`. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Series Indexes of new rows in `updated`. updated_links : dict of pandas.DataFrame """ logger.debug('start: transition') linked_tables = linked_tables or {} updated_links = {} with log_start_finish('add/remove rows', logger): updated, added, copied, removed = self.transitioner(data, year) for table_name, (table, col) in linked_tables.items(): logger.debug('updating linked table {}'.format(table_name)) updated_links[table_name] = \ _update_linked_table(table, col, added, copied, removed) logger.debug('finish: transition') return updated, added, updated_links
[ "def", "transition", "(", "self", ",", "data", ",", "year", ",", "linked_tables", "=", "None", ")", ":", "logger", ".", "debug", "(", "'start: transition'", ")", "linked_tables", "=", "linked_tables", "or", "{", "}", "updated_links", "=", "{", "}", "with", "log_start_finish", "(", "'add/remove rows'", ",", "logger", ")", ":", "updated", ",", "added", ",", "copied", ",", "removed", "=", "self", ".", "transitioner", "(", "data", ",", "year", ")", "for", "table_name", ",", "(", "table", ",", "col", ")", "in", "linked_tables", ".", "items", "(", ")", ":", "logger", ".", "debug", "(", "'updating linked table {}'", ".", "format", "(", "table_name", ")", ")", "updated_links", "[", "table_name", "]", "=", "_update_linked_table", "(", "table", ",", "col", ",", "added", ",", "copied", ",", "removed", ")", "logger", ".", "debug", "(", "'finish: transition'", ")", "return", "updated", ",", "added", ",", "updated_links" ]
Add or remove rows from a table based on population targets. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : int Year number that will be passed to `transitioner`. linked_tables : dict of tuple, optional Dictionary of (table, 'column name') pairs. The column name should match the index of `data`. Indexes in `data` that are copied or removed will also be copied and removed in linked tables. They dictionary keys are used in the returned `updated_links`. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Series Indexes of new rows in `updated`. updated_links : dict of pandas.DataFrame
[ "Add", "or", "remove", "rows", "from", "a", "table", "based", "on", "population", "targets", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L486-L525
2,981
UDST/urbansim
urbansim/utils/yamlio.py
series_to_yaml_safe
def series_to_yaml_safe(series, ordered=False): """ Convert a pandas Series to a dict that will survive YAML serialization and re-conversion back to a Series. Parameters ---------- series : pandas.Series ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict """ index = series.index.to_native_types(quoting=True) values = series.values.tolist() if ordered: return OrderedDict( tuple((k, v)) for k, v in zip(index, values)) else: return {i: v for i, v in zip(index, values)}
python
def series_to_yaml_safe(series, ordered=False): """ Convert a pandas Series to a dict that will survive YAML serialization and re-conversion back to a Series. Parameters ---------- series : pandas.Series ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict """ index = series.index.to_native_types(quoting=True) values = series.values.tolist() if ordered: return OrderedDict( tuple((k, v)) for k, v in zip(index, values)) else: return {i: v for i, v in zip(index, values)}
[ "def", "series_to_yaml_safe", "(", "series", ",", "ordered", "=", "False", ")", ":", "index", "=", "series", ".", "index", ".", "to_native_types", "(", "quoting", "=", "True", ")", "values", "=", "series", ".", "values", ".", "tolist", "(", ")", "if", "ordered", ":", "return", "OrderedDict", "(", "tuple", "(", "(", "k", ",", "v", ")", ")", "for", "k", ",", "v", "in", "zip", "(", "index", ",", "values", ")", ")", "else", ":", "return", "{", "i", ":", "v", "for", "i", ",", "v", "in", "zip", "(", "index", ",", "values", ")", "}" ]
Convert a pandas Series to a dict that will survive YAML serialization and re-conversion back to a Series. Parameters ---------- series : pandas.Series ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict
[ "Convert", "a", "pandas", "Series", "to", "a", "dict", "that", "will", "survive", "YAML", "serialization", "and", "re", "-", "conversion", "back", "to", "a", "Series", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L32-L55
2,982
UDST/urbansim
urbansim/utils/yamlio.py
frame_to_yaml_safe
def frame_to_yaml_safe(frame, ordered=False): """ Convert a pandas DataFrame to a dictionary that will survive YAML serialization and re-conversion back to a DataFrame. Parameters ---------- frame : pandas.DataFrame ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict """ if ordered: return OrderedDict(tuple((col, series_to_yaml_safe(series, True)) for col, series in frame.iteritems())) else: return {col: series_to_yaml_safe(series) for col, series in frame.iteritems()}
python
def frame_to_yaml_safe(frame, ordered=False): """ Convert a pandas DataFrame to a dictionary that will survive YAML serialization and re-conversion back to a DataFrame. Parameters ---------- frame : pandas.DataFrame ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict """ if ordered: return OrderedDict(tuple((col, series_to_yaml_safe(series, True)) for col, series in frame.iteritems())) else: return {col: series_to_yaml_safe(series) for col, series in frame.iteritems()}
[ "def", "frame_to_yaml_safe", "(", "frame", ",", "ordered", "=", "False", ")", ":", "if", "ordered", ":", "return", "OrderedDict", "(", "tuple", "(", "(", "col", ",", "series_to_yaml_safe", "(", "series", ",", "True", ")", ")", "for", "col", ",", "series", "in", "frame", ".", "iteritems", "(", ")", ")", ")", "else", ":", "return", "{", "col", ":", "series_to_yaml_safe", "(", "series", ")", "for", "col", ",", "series", "in", "frame", ".", "iteritems", "(", ")", "}" ]
Convert a pandas DataFrame to a dictionary that will survive YAML serialization and re-conversion back to a DataFrame. Parameters ---------- frame : pandas.DataFrame ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict
[ "Convert", "a", "pandas", "DataFrame", "to", "a", "dictionary", "that", "will", "survive", "YAML", "serialization", "and", "re", "-", "conversion", "back", "to", "a", "DataFrame", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L58-L79
2,983
UDST/urbansim
urbansim/utils/yamlio.py
ordered_yaml
def ordered_yaml(cfg, order=None): """ Convert a dictionary to a YAML string with preferential ordering for some keys. Converted string is meant to be fairly human readable. Parameters ---------- cfg : dict Dictionary to convert to a YAML string. order: list If provided, overrides the default key ordering. Returns ------- str Nicely formatted YAML string. """ if order is None: order = ['name', 'model_type', 'segmentation_col', 'fit_filters', 'predict_filters', 'choosers_fit_filters', 'choosers_predict_filters', 'alts_fit_filters', 'alts_predict_filters', 'interaction_predict_filters', 'choice_column', 'sample_size', 'estimation_sample_size', 'prediction_sample_size', 'model_expression', 'ytransform', 'min_segment_size', 'default_config', 'models', 'coefficients', 'fitted'] s = [] for key in order: if key not in cfg: continue s.append( yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4)) for key in cfg: if key in order: continue s.append( yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4)) return '\n'.join(s)
python
def ordered_yaml(cfg, order=None): """ Convert a dictionary to a YAML string with preferential ordering for some keys. Converted string is meant to be fairly human readable. Parameters ---------- cfg : dict Dictionary to convert to a YAML string. order: list If provided, overrides the default key ordering. Returns ------- str Nicely formatted YAML string. """ if order is None: order = ['name', 'model_type', 'segmentation_col', 'fit_filters', 'predict_filters', 'choosers_fit_filters', 'choosers_predict_filters', 'alts_fit_filters', 'alts_predict_filters', 'interaction_predict_filters', 'choice_column', 'sample_size', 'estimation_sample_size', 'prediction_sample_size', 'model_expression', 'ytransform', 'min_segment_size', 'default_config', 'models', 'coefficients', 'fitted'] s = [] for key in order: if key not in cfg: continue s.append( yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4)) for key in cfg: if key in order: continue s.append( yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4)) return '\n'.join(s)
[ "def", "ordered_yaml", "(", "cfg", ",", "order", "=", "None", ")", ":", "if", "order", "is", "None", ":", "order", "=", "[", "'name'", ",", "'model_type'", ",", "'segmentation_col'", ",", "'fit_filters'", ",", "'predict_filters'", ",", "'choosers_fit_filters'", ",", "'choosers_predict_filters'", ",", "'alts_fit_filters'", ",", "'alts_predict_filters'", ",", "'interaction_predict_filters'", ",", "'choice_column'", ",", "'sample_size'", ",", "'estimation_sample_size'", ",", "'prediction_sample_size'", ",", "'model_expression'", ",", "'ytransform'", ",", "'min_segment_size'", ",", "'default_config'", ",", "'models'", ",", "'coefficients'", ",", "'fitted'", "]", "s", "=", "[", "]", "for", "key", "in", "order", ":", "if", "key", "not", "in", "cfg", ":", "continue", "s", ".", "append", "(", "yaml", ".", "dump", "(", "{", "key", ":", "cfg", "[", "key", "]", "}", ",", "default_flow_style", "=", "False", ",", "indent", "=", "4", ")", ")", "for", "key", "in", "cfg", ":", "if", "key", "in", "order", ":", "continue", "s", ".", "append", "(", "yaml", ".", "dump", "(", "{", "key", ":", "cfg", "[", "key", "]", "}", ",", "default_flow_style", "=", "False", ",", "indent", "=", "4", ")", ")", "return", "'\\n'", ".", "join", "(", "s", ")" ]
Convert a dictionary to a YAML string with preferential ordering for some keys. Converted string is meant to be fairly human readable. Parameters ---------- cfg : dict Dictionary to convert to a YAML string. order: list If provided, overrides the default key ordering. Returns ------- str Nicely formatted YAML string.
[ "Convert", "a", "dictionary", "to", "a", "YAML", "string", "with", "preferential", "ordering", "for", "some", "keys", ".", "Converted", "string", "is", "meant", "to", "be", "fairly", "human", "readable", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L92-L134
2,984
UDST/urbansim
urbansim/utils/yamlio.py
convert_to_yaml
def convert_to_yaml(cfg, str_or_buffer): """ Convert a dictionary to YAML and return the string or write it out depending on the type of `str_or_buffer`. Parameters ---------- cfg : dict or OrderedDict Dictionary or OrderedDict to convert. str_or_buffer : None, str, or buffer If None: the YAML string will be returned. If string: YAML will be saved to a file. If buffer: YAML will be written to buffer using the ``.write`` method. Returns ------- str or None YAML string if `str_or_buffer` is None, otherwise None since YAML is written out to a separate destination. """ order = None if isinstance(cfg, OrderedDict): order = [] s = ordered_yaml(cfg, order) if not str_or_buffer: return s elif isinstance(str_or_buffer, str): with open(str_or_buffer, 'w') as f: f.write(s) else: str_or_buffer.write(s)
python
def convert_to_yaml(cfg, str_or_buffer): """ Convert a dictionary to YAML and return the string or write it out depending on the type of `str_or_buffer`. Parameters ---------- cfg : dict or OrderedDict Dictionary or OrderedDict to convert. str_or_buffer : None, str, or buffer If None: the YAML string will be returned. If string: YAML will be saved to a file. If buffer: YAML will be written to buffer using the ``.write`` method. Returns ------- str or None YAML string if `str_or_buffer` is None, otherwise None since YAML is written out to a separate destination. """ order = None if isinstance(cfg, OrderedDict): order = [] s = ordered_yaml(cfg, order) if not str_or_buffer: return s elif isinstance(str_or_buffer, str): with open(str_or_buffer, 'w') as f: f.write(s) else: str_or_buffer.write(s)
[ "def", "convert_to_yaml", "(", "cfg", ",", "str_or_buffer", ")", ":", "order", "=", "None", "if", "isinstance", "(", "cfg", ",", "OrderedDict", ")", ":", "order", "=", "[", "]", "s", "=", "ordered_yaml", "(", "cfg", ",", "order", ")", "if", "not", "str_or_buffer", ":", "return", "s", "elif", "isinstance", "(", "str_or_buffer", ",", "str", ")", ":", "with", "open", "(", "str_or_buffer", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "s", ")", "else", ":", "str_or_buffer", ".", "write", "(", "s", ")" ]
Convert a dictionary to YAML and return the string or write it out depending on the type of `str_or_buffer`. Parameters ---------- cfg : dict or OrderedDict Dictionary or OrderedDict to convert. str_or_buffer : None, str, or buffer If None: the YAML string will be returned. If string: YAML will be saved to a file. If buffer: YAML will be written to buffer using the ``.write`` method. Returns ------- str or None YAML string if `str_or_buffer` is None, otherwise None since YAML is written out to a separate destination.
[ "Convert", "a", "dictionary", "to", "YAML", "and", "return", "the", "string", "or", "write", "it", "out", "depending", "on", "the", "type", "of", "str_or_buffer", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L160-L193
2,985
UDST/urbansim
urbansim/accounts.py
Account.add_transaction
def add_transaction(self, amount, subaccount=None, metadata=None): """ Add a new transaction to the account. Parameters ---------- amount : float Negative for withdrawls, positive for deposits. subaccount : object, optional Any indicator of a subaccount to which this transaction applies. metadata : dict, optional Any extra metadata to record with the transaction. (E.g. Info about where the money is coming from or going.) May not contain keys 'amount' or 'subaccount'. """ metadata = metadata or {} self.transactions.append(Transaction(amount, subaccount, metadata)) self.balance += amount
python
def add_transaction(self, amount, subaccount=None, metadata=None): """ Add a new transaction to the account. Parameters ---------- amount : float Negative for withdrawls, positive for deposits. subaccount : object, optional Any indicator of a subaccount to which this transaction applies. metadata : dict, optional Any extra metadata to record with the transaction. (E.g. Info about where the money is coming from or going.) May not contain keys 'amount' or 'subaccount'. """ metadata = metadata or {} self.transactions.append(Transaction(amount, subaccount, metadata)) self.balance += amount
[ "def", "add_transaction", "(", "self", ",", "amount", ",", "subaccount", "=", "None", ",", "metadata", "=", "None", ")", ":", "metadata", "=", "metadata", "or", "{", "}", "self", ".", "transactions", ".", "append", "(", "Transaction", "(", "amount", ",", "subaccount", ",", "metadata", ")", ")", "self", ".", "balance", "+=", "amount" ]
Add a new transaction to the account. Parameters ---------- amount : float Negative for withdrawls, positive for deposits. subaccount : object, optional Any indicator of a subaccount to which this transaction applies. metadata : dict, optional Any extra metadata to record with the transaction. (E.g. Info about where the money is coming from or going.) May not contain keys 'amount' or 'subaccount'.
[ "Add", "a", "new", "transaction", "to", "the", "account", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/accounts.py#L57-L75
2,986
UDST/urbansim
urbansim/accounts.py
Account.total_transactions_by_subacct
def total_transactions_by_subacct(self, subaccount): """ Get the sum of all transactions for a given subaccount. Parameters ---------- subaccount : object Identifier of subaccount. Returns ------- total : float """ return sum( t.amount for t in self.transactions if t.subaccount == subaccount)
python
def total_transactions_by_subacct(self, subaccount): """ Get the sum of all transactions for a given subaccount. Parameters ---------- subaccount : object Identifier of subaccount. Returns ------- total : float """ return sum( t.amount for t in self.transactions if t.subaccount == subaccount)
[ "def", "total_transactions_by_subacct", "(", "self", ",", "subaccount", ")", ":", "return", "sum", "(", "t", ".", "amount", "for", "t", "in", "self", ".", "transactions", "if", "t", ".", "subaccount", "==", "subaccount", ")" ]
Get the sum of all transactions for a given subaccount. Parameters ---------- subaccount : object Identifier of subaccount. Returns ------- total : float
[ "Get", "the", "sum", "of", "all", "transactions", "for", "a", "given", "subaccount", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/accounts.py#L102-L117
2,987
UDST/urbansim
urbansim/accounts.py
Account.to_frame
def to_frame(self): """ Return transactions as a pandas DataFrame. """ col_names = _column_names_from_metadata( t.metadata for t in self.transactions) def trow(t): return tz.concatv( (t.amount, t.subaccount), (t.metadata.get(c) for c in col_names)) rows = [trow(t) for t in self.transactions] if len(rows) == 0: return pd.DataFrame(columns=COLS + col_names) return pd.DataFrame(rows, columns=COLS + col_names)
python
def to_frame(self): """ Return transactions as a pandas DataFrame. """ col_names = _column_names_from_metadata( t.metadata for t in self.transactions) def trow(t): return tz.concatv( (t.amount, t.subaccount), (t.metadata.get(c) for c in col_names)) rows = [trow(t) for t in self.transactions] if len(rows) == 0: return pd.DataFrame(columns=COLS + col_names) return pd.DataFrame(rows, columns=COLS + col_names)
[ "def", "to_frame", "(", "self", ")", ":", "col_names", "=", "_column_names_from_metadata", "(", "t", ".", "metadata", "for", "t", "in", "self", ".", "transactions", ")", "def", "trow", "(", "t", ")", ":", "return", "tz", ".", "concatv", "(", "(", "t", ".", "amount", ",", "t", ".", "subaccount", ")", ",", "(", "t", ".", "metadata", ".", "get", "(", "c", ")", "for", "c", "in", "col_names", ")", ")", "rows", "=", "[", "trow", "(", "t", ")", "for", "t", "in", "self", ".", "transactions", "]", "if", "len", "(", "rows", ")", "==", "0", ":", "return", "pd", ".", "DataFrame", "(", "columns", "=", "COLS", "+", "col_names", ")", "return", "pd", ".", "DataFrame", "(", "rows", ",", "columns", "=", "COLS", "+", "col_names", ")" ]
Return transactions as a pandas DataFrame.
[ "Return", "transactions", "as", "a", "pandas", "DataFrame", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/accounts.py#L136-L153
2,988
UDST/urbansim
urbansim/models/util.py
apply_filter_query
def apply_filter_query(df, filters=None): """ Use the DataFrame.query method to filter a table down to the desired rows. Parameters ---------- df : pandas.DataFrame filters : list of str or str, optional List of filters to apply. Will be joined together with ' and ' and passed to DataFrame.query. A string will be passed straight to DataFrame.query. If not supplied no filtering will be done. Returns ------- filtered_df : pandas.DataFrame """ with log_start_finish('apply filter query: {!r}'.format(filters), logger): if filters: if isinstance(filters, str): query = filters else: query = ' and '.join(filters) return df.query(query) else: return df
python
def apply_filter_query(df, filters=None): """ Use the DataFrame.query method to filter a table down to the desired rows. Parameters ---------- df : pandas.DataFrame filters : list of str or str, optional List of filters to apply. Will be joined together with ' and ' and passed to DataFrame.query. A string will be passed straight to DataFrame.query. If not supplied no filtering will be done. Returns ------- filtered_df : pandas.DataFrame """ with log_start_finish('apply filter query: {!r}'.format(filters), logger): if filters: if isinstance(filters, str): query = filters else: query = ' and '.join(filters) return df.query(query) else: return df
[ "def", "apply_filter_query", "(", "df", ",", "filters", "=", "None", ")", ":", "with", "log_start_finish", "(", "'apply filter query: {!r}'", ".", "format", "(", "filters", ")", ",", "logger", ")", ":", "if", "filters", ":", "if", "isinstance", "(", "filters", ",", "str", ")", ":", "query", "=", "filters", "else", ":", "query", "=", "' and '", ".", "join", "(", "filters", ")", "return", "df", ".", "query", "(", "query", ")", "else", ":", "return", "df" ]
Use the DataFrame.query method to filter a table down to the desired rows. Parameters ---------- df : pandas.DataFrame filters : list of str or str, optional List of filters to apply. Will be joined together with ' and ' and passed to DataFrame.query. A string will be passed straight to DataFrame.query. If not supplied no filtering will be done. Returns ------- filtered_df : pandas.DataFrame
[ "Use", "the", "DataFrame", ".", "query", "method", "to", "filter", "a", "table", "down", "to", "the", "desired", "rows", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L24-L51
2,989
UDST/urbansim
urbansim/models/util.py
_filterize
def _filterize(name, value): """ Turn a `name` and `value` into a string expression compatible the ``DataFrame.query`` method. Parameters ---------- name : str Should be the name of a column in the table to which the filter will be applied. A suffix of '_max' will result in a "less than" filter, a suffix of '_min' will result in a "greater than or equal to" filter, and no recognized suffix will result in an "equal to" filter. value : any Value side of filter for comparison to column values. Returns ------- filter_exp : str """ if name.endswith('_min'): name = name[:-4] comp = '>=' elif name.endswith('_max'): name = name[:-4] comp = '<' else: comp = '==' result = '{} {} {!r}'.format(name, comp, value) logger.debug( 'converted name={} and value={} to filter {}'.format( name, value, result)) return result
python
def _filterize(name, value): """ Turn a `name` and `value` into a string expression compatible the ``DataFrame.query`` method. Parameters ---------- name : str Should be the name of a column in the table to which the filter will be applied. A suffix of '_max' will result in a "less than" filter, a suffix of '_min' will result in a "greater than or equal to" filter, and no recognized suffix will result in an "equal to" filter. value : any Value side of filter for comparison to column values. Returns ------- filter_exp : str """ if name.endswith('_min'): name = name[:-4] comp = '>=' elif name.endswith('_max'): name = name[:-4] comp = '<' else: comp = '==' result = '{} {} {!r}'.format(name, comp, value) logger.debug( 'converted name={} and value={} to filter {}'.format( name, value, result)) return result
[ "def", "_filterize", "(", "name", ",", "value", ")", ":", "if", "name", ".", "endswith", "(", "'_min'", ")", ":", "name", "=", "name", "[", ":", "-", "4", "]", "comp", "=", "'>='", "elif", "name", ".", "endswith", "(", "'_max'", ")", ":", "name", "=", "name", "[", ":", "-", "4", "]", "comp", "=", "'<'", "else", ":", "comp", "=", "'=='", "result", "=", "'{} {} {!r}'", ".", "format", "(", "name", ",", "comp", ",", "value", ")", "logger", ".", "debug", "(", "'converted name={} and value={} to filter {}'", ".", "format", "(", "name", ",", "value", ",", "result", ")", ")", "return", "result" ]
Turn a `name` and `value` into a string expression compatible the ``DataFrame.query`` method. Parameters ---------- name : str Should be the name of a column in the table to which the filter will be applied. A suffix of '_max' will result in a "less than" filter, a suffix of '_min' will result in a "greater than or equal to" filter, and no recognized suffix will result in an "equal to" filter. value : any Value side of filter for comparison to column values. Returns ------- filter_exp : str
[ "Turn", "a", "name", "and", "value", "into", "a", "string", "expression", "compatible", "the", "DataFrame", ".", "query", "method", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L54-L89
2,990
UDST/urbansim
urbansim/models/util.py
str_model_expression
def str_model_expression(expr, add_constant=True): """ We support specifying model expressions as strings, lists, or dicts; but for use with patsy and statsmodels we need a string. This function will take any of those as input and return a string. Parameters ---------- expr : str, iterable, or dict A string will be returned unmodified except to add or remove a constant. An iterable sequence will be joined together with ' + '. A dictionary should have ``right_side`` and, optionally, ``left_side`` keys. The ``right_side`` can be a list or a string and will be handled as above. If ``left_side`` is present it will be joined with ``right_side`` with ' ~ '. add_constant : bool, optional Whether to add a ' + 1' (if True) or ' - 1' (if False) to the model. If the expression already has a '+ 1' or '- 1' this option will be ignored. Returns ------- model_expression : str A string model expression suitable for use with statsmodels and patsy. """ if not isinstance(expr, str): if isinstance(expr, collections.Mapping): left_side = expr.get('left_side') right_side = str_model_expression(expr['right_side'], add_constant) else: # some kind of iterable like a list left_side = None right_side = ' + '.join(expr) if left_side: model_expression = ' ~ '.join((left_side, right_side)) else: model_expression = right_side else: model_expression = expr if not has_constant_expr(model_expression): if add_constant: model_expression += ' + 1' else: model_expression += ' - 1' logger.debug( 'converted expression: {!r} to model: {!r}'.format( expr, model_expression)) return model_expression
python
def str_model_expression(expr, add_constant=True): """ We support specifying model expressions as strings, lists, or dicts; but for use with patsy and statsmodels we need a string. This function will take any of those as input and return a string. Parameters ---------- expr : str, iterable, or dict A string will be returned unmodified except to add or remove a constant. An iterable sequence will be joined together with ' + '. A dictionary should have ``right_side`` and, optionally, ``left_side`` keys. The ``right_side`` can be a list or a string and will be handled as above. If ``left_side`` is present it will be joined with ``right_side`` with ' ~ '. add_constant : bool, optional Whether to add a ' + 1' (if True) or ' - 1' (if False) to the model. If the expression already has a '+ 1' or '- 1' this option will be ignored. Returns ------- model_expression : str A string model expression suitable for use with statsmodels and patsy. """ if not isinstance(expr, str): if isinstance(expr, collections.Mapping): left_side = expr.get('left_side') right_side = str_model_expression(expr['right_side'], add_constant) else: # some kind of iterable like a list left_side = None right_side = ' + '.join(expr) if left_side: model_expression = ' ~ '.join((left_side, right_side)) else: model_expression = right_side else: model_expression = expr if not has_constant_expr(model_expression): if add_constant: model_expression += ' + 1' else: model_expression += ' - 1' logger.debug( 'converted expression: {!r} to model: {!r}'.format( expr, model_expression)) return model_expression
[ "def", "str_model_expression", "(", "expr", ",", "add_constant", "=", "True", ")", ":", "if", "not", "isinstance", "(", "expr", ",", "str", ")", ":", "if", "isinstance", "(", "expr", ",", "collections", ".", "Mapping", ")", ":", "left_side", "=", "expr", ".", "get", "(", "'left_side'", ")", "right_side", "=", "str_model_expression", "(", "expr", "[", "'right_side'", "]", ",", "add_constant", ")", "else", ":", "# some kind of iterable like a list", "left_side", "=", "None", "right_side", "=", "' + '", ".", "join", "(", "expr", ")", "if", "left_side", ":", "model_expression", "=", "' ~ '", ".", "join", "(", "(", "left_side", ",", "right_side", ")", ")", "else", ":", "model_expression", "=", "right_side", "else", ":", "model_expression", "=", "expr", "if", "not", "has_constant_expr", "(", "model_expression", ")", ":", "if", "add_constant", ":", "model_expression", "+=", "' + 1'", "else", ":", "model_expression", "+=", "' - 1'", "logger", ".", "debug", "(", "'converted expression: {!r} to model: {!r}'", ".", "format", "(", "expr", ",", "model_expression", ")", ")", "return", "model_expression" ]
We support specifying model expressions as strings, lists, or dicts; but for use with patsy and statsmodels we need a string. This function will take any of those as input and return a string. Parameters ---------- expr : str, iterable, or dict A string will be returned unmodified except to add or remove a constant. An iterable sequence will be joined together with ' + '. A dictionary should have ``right_side`` and, optionally, ``left_side`` keys. The ``right_side`` can be a list or a string and will be handled as above. If ``left_side`` is present it will be joined with ``right_side`` with ' ~ '. add_constant : bool, optional Whether to add a ' + 1' (if True) or ' - 1' (if False) to the model. If the expression already has a '+ 1' or '- 1' this option will be ignored. Returns ------- model_expression : str A string model expression suitable for use with statsmodels and patsy.
[ "We", "support", "specifying", "model", "expressions", "as", "strings", "lists", "or", "dicts", ";", "but", "for", "use", "with", "patsy", "and", "statsmodels", "we", "need", "a", "string", ".", "This", "function", "will", "take", "any", "of", "those", "as", "input", "and", "return", "a", "string", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L174-L227
2,991
UDST/urbansim
urbansim/models/util.py
sorted_groupby
def sorted_groupby(df, groupby): """ Perform a groupby on a DataFrame using a specific column and assuming that that column is sorted. Parameters ---------- df : pandas.DataFrame groupby : object Column name on which to groupby. This column must be sorted. Returns ------- generator Yields pairs of group_name, DataFrame. """ start = 0 prev = df[groupby].iloc[start] for i, x in enumerate(df[groupby]): if x != prev: yield prev, df.iloc[start:i] prev = x start = i # need to send back the last group yield prev, df.iloc[start:]
python
def sorted_groupby(df, groupby): """ Perform a groupby on a DataFrame using a specific column and assuming that that column is sorted. Parameters ---------- df : pandas.DataFrame groupby : object Column name on which to groupby. This column must be sorted. Returns ------- generator Yields pairs of group_name, DataFrame. """ start = 0 prev = df[groupby].iloc[start] for i, x in enumerate(df[groupby]): if x != prev: yield prev, df.iloc[start:i] prev = x start = i # need to send back the last group yield prev, df.iloc[start:]
[ "def", "sorted_groupby", "(", "df", ",", "groupby", ")", ":", "start", "=", "0", "prev", "=", "df", "[", "groupby", "]", ".", "iloc", "[", "start", "]", "for", "i", ",", "x", "in", "enumerate", "(", "df", "[", "groupby", "]", ")", ":", "if", "x", "!=", "prev", ":", "yield", "prev", ",", "df", ".", "iloc", "[", "start", ":", "i", "]", "prev", "=", "x", "start", "=", "i", "# need to send back the last group", "yield", "prev", ",", "df", ".", "iloc", "[", "start", ":", "]" ]
Perform a groupby on a DataFrame using a specific column and assuming that that column is sorted. Parameters ---------- df : pandas.DataFrame groupby : object Column name on which to groupby. This column must be sorted. Returns ------- generator Yields pairs of group_name, DataFrame.
[ "Perform", "a", "groupby", "on", "a", "DataFrame", "using", "a", "specific", "column", "and", "assuming", "that", "that", "column", "is", "sorted", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L230-L255
2,992
UDST/urbansim
urbansim/models/util.py
columns_in_filters
def columns_in_filters(filters): """ Returns a list of the columns used in a set of query filters. Parameters ---------- filters : list of str or str List of the filters as passed passed to ``apply_filter_query``. Returns ------- columns : list of str List of all the strings mentioned in the filters. """ if not filters: return [] if not isinstance(filters, str): filters = ' '.join(filters) columns = [] reserved = {'and', 'or', 'in', 'not'} for toknum, tokval, _, _, _ in generate_tokens(StringIO(filters).readline): if toknum == NAME and tokval not in reserved: columns.append(tokval) return list(tz.unique(columns))
python
def columns_in_filters(filters): """ Returns a list of the columns used in a set of query filters. Parameters ---------- filters : list of str or str List of the filters as passed passed to ``apply_filter_query``. Returns ------- columns : list of str List of all the strings mentioned in the filters. """ if not filters: return [] if not isinstance(filters, str): filters = ' '.join(filters) columns = [] reserved = {'and', 'or', 'in', 'not'} for toknum, tokval, _, _, _ in generate_tokens(StringIO(filters).readline): if toknum == NAME and tokval not in reserved: columns.append(tokval) return list(tz.unique(columns))
[ "def", "columns_in_filters", "(", "filters", ")", ":", "if", "not", "filters", ":", "return", "[", "]", "if", "not", "isinstance", "(", "filters", ",", "str", ")", ":", "filters", "=", "' '", ".", "join", "(", "filters", ")", "columns", "=", "[", "]", "reserved", "=", "{", "'and'", ",", "'or'", ",", "'in'", ",", "'not'", "}", "for", "toknum", ",", "tokval", ",", "_", ",", "_", ",", "_", "in", "generate_tokens", "(", "StringIO", "(", "filters", ")", ".", "readline", ")", ":", "if", "toknum", "==", "NAME", "and", "tokval", "not", "in", "reserved", ":", "columns", ".", "append", "(", "tokval", ")", "return", "list", "(", "tz", ".", "unique", "(", "columns", ")", ")" ]
Returns a list of the columns used in a set of query filters. Parameters ---------- filters : list of str or str List of the filters as passed passed to ``apply_filter_query``. Returns ------- columns : list of str List of all the strings mentioned in the filters.
[ "Returns", "a", "list", "of", "the", "columns", "used", "in", "a", "set", "of", "query", "filters", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L258-L286
2,993
UDST/urbansim
urbansim/models/util.py
_tokens_from_patsy
def _tokens_from_patsy(node): """ Yields all the individual tokens from within a patsy formula as parsed by patsy.parse_formula.parse_formula. Parameters ---------- node : patsy.parse_formula.ParseNode """ for n in node.args: for t in _tokens_from_patsy(n): yield t if node.token: yield node.token
python
def _tokens_from_patsy(node): """ Yields all the individual tokens from within a patsy formula as parsed by patsy.parse_formula.parse_formula. Parameters ---------- node : patsy.parse_formula.ParseNode """ for n in node.args: for t in _tokens_from_patsy(n): yield t if node.token: yield node.token
[ "def", "_tokens_from_patsy", "(", "node", ")", ":", "for", "n", "in", "node", ".", "args", ":", "for", "t", "in", "_tokens_from_patsy", "(", "n", ")", ":", "yield", "t", "if", "node", ".", "token", ":", "yield", "node", ".", "token" ]
Yields all the individual tokens from within a patsy formula as parsed by patsy.parse_formula.parse_formula. Parameters ---------- node : patsy.parse_formula.ParseNode
[ "Yields", "all", "the", "individual", "tokens", "from", "within", "a", "patsy", "formula", "as", "parsed", "by", "patsy", ".", "parse_formula", ".", "parse_formula", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L289-L304
2,994
UDST/urbansim
urbansim/models/util.py
columns_in_formula
def columns_in_formula(formula): """ Returns the names of all the columns used in a patsy formula. Parameters ---------- formula : str, iterable, or dict Any formula construction supported by ``str_model_expression``. Returns ------- columns : list of str """ if formula is None: return [] formula = str_model_expression(formula, add_constant=False) columns = [] tokens = map( lambda x: x.extra, tz.remove( lambda x: x.extra is None, _tokens_from_patsy(patsy.parse_formula.parse_formula(formula)))) for tok in tokens: # if there are parentheses in the expression we # want to drop them and everything outside # and start again from the top if '(' in tok: start = tok.find('(') + 1 fin = tok.rfind(')') columns.extend(columns_in_formula(tok[start:fin])) else: for toknum, tokval, _, _, _ in generate_tokens( StringIO(tok).readline): if toknum == NAME: columns.append(tokval) return list(tz.unique(columns))
python
def columns_in_formula(formula): """ Returns the names of all the columns used in a patsy formula. Parameters ---------- formula : str, iterable, or dict Any formula construction supported by ``str_model_expression``. Returns ------- columns : list of str """ if formula is None: return [] formula = str_model_expression(formula, add_constant=False) columns = [] tokens = map( lambda x: x.extra, tz.remove( lambda x: x.extra is None, _tokens_from_patsy(patsy.parse_formula.parse_formula(formula)))) for tok in tokens: # if there are parentheses in the expression we # want to drop them and everything outside # and start again from the top if '(' in tok: start = tok.find('(') + 1 fin = tok.rfind(')') columns.extend(columns_in_formula(tok[start:fin])) else: for toknum, tokval, _, _, _ in generate_tokens( StringIO(tok).readline): if toknum == NAME: columns.append(tokval) return list(tz.unique(columns))
[ "def", "columns_in_formula", "(", "formula", ")", ":", "if", "formula", "is", "None", ":", "return", "[", "]", "formula", "=", "str_model_expression", "(", "formula", ",", "add_constant", "=", "False", ")", "columns", "=", "[", "]", "tokens", "=", "map", "(", "lambda", "x", ":", "x", ".", "extra", ",", "tz", ".", "remove", "(", "lambda", "x", ":", "x", ".", "extra", "is", "None", ",", "_tokens_from_patsy", "(", "patsy", ".", "parse_formula", ".", "parse_formula", "(", "formula", ")", ")", ")", ")", "for", "tok", "in", "tokens", ":", "# if there are parentheses in the expression we", "# want to drop them and everything outside", "# and start again from the top", "if", "'('", "in", "tok", ":", "start", "=", "tok", ".", "find", "(", "'('", ")", "+", "1", "fin", "=", "tok", ".", "rfind", "(", "')'", ")", "columns", ".", "extend", "(", "columns_in_formula", "(", "tok", "[", "start", ":", "fin", "]", ")", ")", "else", ":", "for", "toknum", ",", "tokval", ",", "_", ",", "_", ",", "_", "in", "generate_tokens", "(", "StringIO", "(", "tok", ")", ".", "readline", ")", ":", "if", "toknum", "==", "NAME", ":", "columns", ".", "append", "(", "tokval", ")", "return", "list", "(", "tz", ".", "unique", "(", "columns", ")", ")" ]
Returns the names of all the columns used in a patsy formula. Parameters ---------- formula : str, iterable, or dict Any formula construction supported by ``str_model_expression``. Returns ------- columns : list of str
[ "Returns", "the", "names", "of", "all", "the", "columns", "used", "in", "a", "patsy", "formula", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L307-L347
2,995
UDST/urbansim
urbansim/models/regression.py
fit_model
def fit_model(df, filters, model_expression): """ Use statsmodels OLS to construct a model relation. Parameters ---------- df : pandas.DataFrame Data to use for fit. Should contain all the columns referenced in the `model_expression`. filters : list of str Any filters to apply before doing the model fit. model_expression : str A patsy model expression that can be used with statsmodels. Should contain both the left- and right-hand sides. Returns ------- fit : statsmodels.regression.linear_model.OLSResults """ df = util.apply_filter_query(df, filters) model = smf.ols(formula=model_expression, data=df) if len(model.exog) != len(df): raise ModelEvaluationError( 'Estimated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') with log_start_finish('statsmodels OLS fit', logger): return model.fit()
python
def fit_model(df, filters, model_expression): """ Use statsmodels OLS to construct a model relation. Parameters ---------- df : pandas.DataFrame Data to use for fit. Should contain all the columns referenced in the `model_expression`. filters : list of str Any filters to apply before doing the model fit. model_expression : str A patsy model expression that can be used with statsmodels. Should contain both the left- and right-hand sides. Returns ------- fit : statsmodels.regression.linear_model.OLSResults """ df = util.apply_filter_query(df, filters) model = smf.ols(formula=model_expression, data=df) if len(model.exog) != len(df): raise ModelEvaluationError( 'Estimated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') with log_start_finish('statsmodels OLS fit', logger): return model.fit()
[ "def", "fit_model", "(", "df", ",", "filters", ",", "model_expression", ")", ":", "df", "=", "util", ".", "apply_filter_query", "(", "df", ",", "filters", ")", "model", "=", "smf", ".", "ols", "(", "formula", "=", "model_expression", ",", "data", "=", "df", ")", "if", "len", "(", "model", ".", "exog", ")", "!=", "len", "(", "df", ")", ":", "raise", "ModelEvaluationError", "(", "'Estimated data does not have the same length as input. '", "'This suggests there are null values in one or more of '", "'the input columns.'", ")", "with", "log_start_finish", "(", "'statsmodels OLS fit'", ",", "logger", ")", ":", "return", "model", ".", "fit", "(", ")" ]
Use statsmodels OLS to construct a model relation. Parameters ---------- df : pandas.DataFrame Data to use for fit. Should contain all the columns referenced in the `model_expression`. filters : list of str Any filters to apply before doing the model fit. model_expression : str A patsy model expression that can be used with statsmodels. Should contain both the left- and right-hand sides. Returns ------- fit : statsmodels.regression.linear_model.OLSResults
[ "Use", "statsmodels", "OLS", "to", "construct", "a", "model", "relation", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L25-L55
2,996
UDST/urbansim
urbansim/models/regression.py
predict
def predict(df, filters, model_fit, ytransform=None): """ Apply model to new data to predict new dependent values. Parameters ---------- df : pandas.DataFrame filters : list of str Any filters to apply before doing prediction. model_fit : statsmodels.regression.linear_model.OLSResults Result of model estimation. ytransform : callable, optional A function to call on the array of predicted output. For example, if the model relation is predicting the log of price, you might pass ``ytransform=np.exp`` so that the results reflect actual price. By default no transformation is applied. Returns ------- result : pandas.Series Predicted values as a pandas Series. Will have the index of `df` after applying filters. """ df = util.apply_filter_query(df, filters) with log_start_finish('statsmodels predict', logger): sim_data = model_fit.predict(df) if len(sim_data) != len(df): raise ModelEvaluationError( 'Predicted data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') if ytransform: sim_data = ytransform(sim_data) return pd.Series(sim_data, index=df.index)
python
def predict(df, filters, model_fit, ytransform=None): """ Apply model to new data to predict new dependent values. Parameters ---------- df : pandas.DataFrame filters : list of str Any filters to apply before doing prediction. model_fit : statsmodels.regression.linear_model.OLSResults Result of model estimation. ytransform : callable, optional A function to call on the array of predicted output. For example, if the model relation is predicting the log of price, you might pass ``ytransform=np.exp`` so that the results reflect actual price. By default no transformation is applied. Returns ------- result : pandas.Series Predicted values as a pandas Series. Will have the index of `df` after applying filters. """ df = util.apply_filter_query(df, filters) with log_start_finish('statsmodels predict', logger): sim_data = model_fit.predict(df) if len(sim_data) != len(df): raise ModelEvaluationError( 'Predicted data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') if ytransform: sim_data = ytransform(sim_data) return pd.Series(sim_data, index=df.index)
[ "def", "predict", "(", "df", ",", "filters", ",", "model_fit", ",", "ytransform", "=", "None", ")", ":", "df", "=", "util", ".", "apply_filter_query", "(", "df", ",", "filters", ")", "with", "log_start_finish", "(", "'statsmodels predict'", ",", "logger", ")", ":", "sim_data", "=", "model_fit", ".", "predict", "(", "df", ")", "if", "len", "(", "sim_data", ")", "!=", "len", "(", "df", ")", ":", "raise", "ModelEvaluationError", "(", "'Predicted data does not have the same length as input. '", "'This suggests there are null values in one or more of '", "'the input columns.'", ")", "if", "ytransform", ":", "sim_data", "=", "ytransform", "(", "sim_data", ")", "return", "pd", ".", "Series", "(", "sim_data", ",", "index", "=", "df", ".", "index", ")" ]
Apply model to new data to predict new dependent values. Parameters ---------- df : pandas.DataFrame filters : list of str Any filters to apply before doing prediction. model_fit : statsmodels.regression.linear_model.OLSResults Result of model estimation. ytransform : callable, optional A function to call on the array of predicted output. For example, if the model relation is predicting the log of price, you might pass ``ytransform=np.exp`` so that the results reflect actual price. By default no transformation is applied. Returns ------- result : pandas.Series Predicted values as a pandas Series. Will have the index of `df` after applying filters.
[ "Apply", "model", "to", "new", "data", "to", "predict", "new", "dependent", "values", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L58-L97
2,997
UDST/urbansim
urbansim/models/regression.py
_model_fit_to_table
def _model_fit_to_table(fit): """ Produce a pandas DataFrame of model fit results from a statsmodels fit result object. Parameters ---------- fit : statsmodels.regression.linear_model.RegressionResults Returns ------- fit_parameters : pandas.DataFrame Will have columns 'Coefficient', 'Std. Error', and 'T-Score'. Index will be model terms. This frame will also have non-standard attributes .rsquared and .rsquared_adj with the same meaning and value as on `fit`. """ fit_parameters = pd.DataFrame( {'Coefficient': fit.params, 'Std. Error': fit.bse, 'T-Score': fit.tvalues}) fit_parameters.rsquared = fit.rsquared fit_parameters.rsquared_adj = fit.rsquared_adj return fit_parameters
python
def _model_fit_to_table(fit): """ Produce a pandas DataFrame of model fit results from a statsmodels fit result object. Parameters ---------- fit : statsmodels.regression.linear_model.RegressionResults Returns ------- fit_parameters : pandas.DataFrame Will have columns 'Coefficient', 'Std. Error', and 'T-Score'. Index will be model terms. This frame will also have non-standard attributes .rsquared and .rsquared_adj with the same meaning and value as on `fit`. """ fit_parameters = pd.DataFrame( {'Coefficient': fit.params, 'Std. Error': fit.bse, 'T-Score': fit.tvalues}) fit_parameters.rsquared = fit.rsquared fit_parameters.rsquared_adj = fit.rsquared_adj return fit_parameters
[ "def", "_model_fit_to_table", "(", "fit", ")", ":", "fit_parameters", "=", "pd", ".", "DataFrame", "(", "{", "'Coefficient'", ":", "fit", ".", "params", ",", "'Std. Error'", ":", "fit", ".", "bse", ",", "'T-Score'", ":", "fit", ".", "tvalues", "}", ")", "fit_parameters", ".", "rsquared", "=", "fit", ".", "rsquared", "fit_parameters", ".", "rsquared_adj", "=", "fit", ".", "rsquared_adj", "return", "fit_parameters" ]
Produce a pandas DataFrame of model fit results from a statsmodels fit result object. Parameters ---------- fit : statsmodels.regression.linear_model.RegressionResults Returns ------- fit_parameters : pandas.DataFrame Will have columns 'Coefficient', 'Std. Error', and 'T-Score'. Index will be model terms. This frame will also have non-standard attributes .rsquared and .rsquared_adj with the same meaning and value as on `fit`.
[ "Produce", "a", "pandas", "DataFrame", "of", "model", "fit", "results", "from", "a", "statsmodels", "fit", "result", "object", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L178-L204
2,998
UDST/urbansim
urbansim/models/regression.py
_FakeRegressionResults.predict
def predict(self, data): """ Predict new values by running data through the fit model. Parameters ---------- data : pandas.DataFrame Table with columns corresponding to the RHS of `model_expression`. Returns ------- predicted : ndarray Array of predicted values. """ with log_start_finish('_FakeRegressionResults prediction', logger): model_design = dmatrix( self._rhs, data=data, return_type='dataframe') return model_design.dot(self.params).values
python
def predict(self, data): """ Predict new values by running data through the fit model. Parameters ---------- data : pandas.DataFrame Table with columns corresponding to the RHS of `model_expression`. Returns ------- predicted : ndarray Array of predicted values. """ with log_start_finish('_FakeRegressionResults prediction', logger): model_design = dmatrix( self._rhs, data=data, return_type='dataframe') return model_design.dot(self.params).values
[ "def", "predict", "(", "self", ",", "data", ")", ":", "with", "log_start_finish", "(", "'_FakeRegressionResults prediction'", ",", "logger", ")", ":", "model_design", "=", "dmatrix", "(", "self", ".", "_rhs", ",", "data", "=", "data", ",", "return_type", "=", "'dataframe'", ")", "return", "model_design", ".", "dot", "(", "self", ".", "params", ")", ".", "values" ]
Predict new values by running data through the fit model. Parameters ---------- data : pandas.DataFrame Table with columns corresponding to the RHS of `model_expression`. Returns ------- predicted : ndarray Array of predicted values.
[ "Predict", "new", "values", "by", "running", "data", "through", "the", "fit", "model", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L157-L175
2,999
UDST/urbansim
urbansim/models/regression.py
RegressionModel.from_yaml
def from_yaml(cls, yaml_str=None, str_or_buffer=None): """ Create a RegressionModel instance from a saved YAML configuration. Arguments are mutually exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- RegressionModel """ cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) model = cls( cfg['fit_filters'], cfg['predict_filters'], cfg['model_expression'], YTRANSFORM_MAPPING[cfg['ytransform']], cfg['name']) if 'fitted' in cfg and cfg['fitted']: fit_parameters = pd.DataFrame(cfg['fit_parameters']) fit_parameters.rsquared = cfg['fit_rsquared'] fit_parameters.rsquared_adj = cfg['fit_rsquared_adj'] model.model_fit = _FakeRegressionResults( model.str_model_expression, fit_parameters, cfg['fit_rsquared'], cfg['fit_rsquared_adj']) model.fit_parameters = fit_parameters logger.debug('loaded regression model {} from YAML'.format(model.name)) return model
python
def from_yaml(cls, yaml_str=None, str_or_buffer=None): """ Create a RegressionModel instance from a saved YAML configuration. Arguments are mutually exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- RegressionModel """ cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) model = cls( cfg['fit_filters'], cfg['predict_filters'], cfg['model_expression'], YTRANSFORM_MAPPING[cfg['ytransform']], cfg['name']) if 'fitted' in cfg and cfg['fitted']: fit_parameters = pd.DataFrame(cfg['fit_parameters']) fit_parameters.rsquared = cfg['fit_rsquared'] fit_parameters.rsquared_adj = cfg['fit_rsquared_adj'] model.model_fit = _FakeRegressionResults( model.str_model_expression, fit_parameters, cfg['fit_rsquared'], cfg['fit_rsquared_adj']) model.fit_parameters = fit_parameters logger.debug('loaded regression model {} from YAML'.format(model.name)) return model
[ "def", "from_yaml", "(", "cls", ",", "yaml_str", "=", "None", ",", "str_or_buffer", "=", "None", ")", ":", "cfg", "=", "yamlio", ".", "yaml_to_dict", "(", "yaml_str", ",", "str_or_buffer", ")", "model", "=", "cls", "(", "cfg", "[", "'fit_filters'", "]", ",", "cfg", "[", "'predict_filters'", "]", ",", "cfg", "[", "'model_expression'", "]", ",", "YTRANSFORM_MAPPING", "[", "cfg", "[", "'ytransform'", "]", "]", ",", "cfg", "[", "'name'", "]", ")", "if", "'fitted'", "in", "cfg", "and", "cfg", "[", "'fitted'", "]", ":", "fit_parameters", "=", "pd", ".", "DataFrame", "(", "cfg", "[", "'fit_parameters'", "]", ")", "fit_parameters", ".", "rsquared", "=", "cfg", "[", "'fit_rsquared'", "]", "fit_parameters", ".", "rsquared_adj", "=", "cfg", "[", "'fit_rsquared_adj'", "]", "model", ".", "model_fit", "=", "_FakeRegressionResults", "(", "model", ".", "str_model_expression", ",", "fit_parameters", ",", "cfg", "[", "'fit_rsquared'", "]", ",", "cfg", "[", "'fit_rsquared_adj'", "]", ")", "model", ".", "fit_parameters", "=", "fit_parameters", "logger", ".", "debug", "(", "'loaded regression model {} from YAML'", ".", "format", "(", "model", ".", "name", ")", ")", "return", "model" ]
Create a RegressionModel instance from a saved YAML configuration. Arguments are mutually exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- RegressionModel
[ "Create", "a", "RegressionModel", "instance", "from", "a", "saved", "YAML", "configuration", ".", "Arguments", "are", "mutually", "exclusive", "." ]
79f815a6503e109f50be270cee92d0f4a34f49ef
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L260-L298