repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
39
1.84M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller._convert_type_to_char
def _convert_type_to_char(self, type_char): """ Converts the given type code to an int :param type_char: A type code character """ typecode = type_char if type(type_char) is int: typecode = chr(type_char) if typecode in self.TYPECODES_LIST: return ord(typecode) elif len(typecode) > 1: if typecode[0] == "L": return ord(self.TYPE_OBJECT) elif typecode[0] == "[": return ord(self.TYPE_ARRAY) raise RuntimeError( "Typecode {0} ({1}) isn't supported.".format(type_char, typecode) )
python
def _convert_type_to_char(self, type_char): typecode = type_char if type(type_char) is int: typecode = chr(type_char) if typecode in self.TYPECODES_LIST: return ord(typecode) elif len(typecode) > 1: if typecode[0] == "L": return ord(self.TYPE_OBJECT) elif typecode[0] == "[": return ord(self.TYPE_ARRAY) raise RuntimeError( "Typecode {0} ({1}) isn't supported.".format(type_char, typecode) )
[ "def", "_convert_type_to_char", "(", "self", ",", "type_char", ")", ":", "typecode", "=", "type_char", "if", "type", "(", "type_char", ")", "is", "int", ":", "typecode", "=", "chr", "(", "type_char", ")", "if", "typecode", "in", "self", ".", "TYPECODES_LIST", ":", "return", "ord", "(", "typecode", ")", "elif", "len", "(", "typecode", ")", ">", "1", ":", "if", "typecode", "[", "0", "]", "==", "\"L\"", ":", "return", "ord", "(", "self", ".", "TYPE_OBJECT", ")", "elif", "typecode", "[", "0", "]", "==", "\"[\"", ":", "return", "ord", "(", "self", ".", "TYPE_ARRAY", ")", "raise", "RuntimeError", "(", "\"Typecode {0} ({1}) isn't supported.\"", ".", "format", "(", "type_char", ",", "typecode", ")", ")" ]
Converts the given type code to an int :param type_char: A type code character
[ "Converts", "the", "given", "type", "code", "to", "an", "int" ]
train
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1640-L1660
tcalmant/python-javaobj
javaobj/core.py
DefaultObjectTransformer.create
def create(self, classdesc, unmarshaller=None): # type: (JavaClass, JavaObjectUnmarshaller) -> JavaObject """ Transforms a deserialized Java object into a Python object :param classdesc: The description of a Java class :return: The Python form of the object, or the original JavaObject """ try: mapped_type = self.TYPE_MAPPER[classdesc.name] except KeyError: # Return a JavaObject by default return JavaObject() else: log_debug("---") log_debug(classdesc.name) log_debug("---") java_object = mapped_type(unmarshaller) log_debug(">>> java_object: {0}".format(java_object)) return java_object
python
def create(self, classdesc, unmarshaller=None): try: mapped_type = self.TYPE_MAPPER[classdesc.name] except KeyError: return JavaObject() else: log_debug("---") log_debug(classdesc.name) log_debug("---") java_object = mapped_type(unmarshaller) log_debug(">>> java_object: {0}".format(java_object)) return java_object
[ "def", "create", "(", "self", ",", "classdesc", ",", "unmarshaller", "=", "None", ")", ":", "# type: (JavaClass, JavaObjectUnmarshaller) -> JavaObject", "try", ":", "mapped_type", "=", "self", ".", "TYPE_MAPPER", "[", "classdesc", ".", "name", "]", "except", "KeyError", ":", "# Return a JavaObject by default", "return", "JavaObject", "(", ")", "else", ":", "log_debug", "(", "\"---\"", ")", "log_debug", "(", "classdesc", ".", "name", ")", "log_debug", "(", "\"---\"", ")", "java_object", "=", "mapped_type", "(", "unmarshaller", ")", "log_debug", "(", "\">>> java_object: {0}\"", ".", "format", "(", "java_object", ")", ")", "return", "java_object" ]
Transforms a deserialized Java object into a Python object :param classdesc: The description of a Java class :return: The Python form of the object, or the original JavaObject
[ "Transforms", "a", "deserialized", "Java", "object", "into", "a", "Python", "object" ]
train
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L2002-L2023
UDST/urbansim
urbansim/utils/logutil.py
log_start_finish
def log_start_finish(msg, logger, level=logging.DEBUG): """ A context manager to log messages with "start: " and "finish: " prefixes before and after a block. Parameters ---------- msg : str Will be prefixed with "start: " and "finish: ". logger : logging.Logger level : int, optional Level at which to log, passed to ``logger.log``. """ logger.log(level, 'start: ' + msg) yield logger.log(level, 'finish: ' + msg)
python
def log_start_finish(msg, logger, level=logging.DEBUG): logger.log(level, 'start: ' + msg) yield logger.log(level, 'finish: ' + msg)
[ "def", "log_start_finish", "(", "msg", ",", "logger", ",", "level", "=", "logging", ".", "DEBUG", ")", ":", "logger", ".", "log", "(", "level", ",", "'start: '", "+", "msg", ")", "yield", "logger", ".", "log", "(", "level", ",", "'finish: '", "+", "msg", ")" ]
A context manager to log messages with "start: " and "finish: " prefixes before and after a block. Parameters ---------- msg : str Will be prefixed with "start: " and "finish: ". logger : logging.Logger level : int, optional Level at which to log, passed to ``logger.log``.
[ "A", "context", "manager", "to", "log", "messages", "with", "start", ":", "and", "finish", ":", "prefixes", "before", "and", "after", "a", "block", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/logutil.py#L11-L27
UDST/urbansim
urbansim/utils/logutil.py
log_to_file
def log_to_file(filename, level=None, fmt=None, datefmt=None): """ Send log output to the given file. Parameters ---------- filename : str level : int, optional An optional logging level that will apply only to this stream handler. fmt : str, optional An optional format string that will be used for the log messages. datefmt : str, optional An optional format string for formatting dates in the log messages. """ _add_urbansim_handler( logging.FileHandler(filename), fmt=fmt, datefmt=datefmt)
python
def log_to_file(filename, level=None, fmt=None, datefmt=None): _add_urbansim_handler( logging.FileHandler(filename), fmt=fmt, datefmt=datefmt)
[ "def", "log_to_file", "(", "filename", ",", "level", "=", "None", ",", "fmt", "=", "None", ",", "datefmt", "=", "None", ")", ":", "_add_urbansim_handler", "(", "logging", ".", "FileHandler", "(", "filename", ")", ",", "fmt", "=", "fmt", ",", "datefmt", "=", "datefmt", ")" ]
Send log output to the given file. Parameters ---------- filename : str level : int, optional An optional logging level that will apply only to this stream handler. fmt : str, optional An optional format string that will be used for the log messages. datefmt : str, optional An optional format string for formatting dates in the log messages.
[ "Send", "log", "output", "to", "the", "given", "file", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/logutil.py#L104-L123
UDST/urbansim
urbansim/urbanchoice/mnl.py
mnl_simulate
def mnl_simulate(data, coeff, numalts, GPU=False, returnprobs=True): """ Get the probabilities for each chooser choosing between `numalts` alternatives. Parameters ---------- data : 2D array The data are expected to be in "long" form where each row is for one alternative. Alternatives are in groups of `numalts` rows per choosers. Alternatives must be in the same order for each chooser. coeff : 1D array The model coefficients corresponding to each column in `data`. numalts : int The number of alternatives available to each chooser. GPU : bool, optional returnprobs : bool, optional If True, return the probabilities for each chooser/alternative instead of actual choices. Returns ------- probs or choices: 2D array If `returnprobs` is True the probabilities are a 2D array with a row for each chooser and columns for each alternative. """ logger.debug( 'start: MNL simulation with len(data)={} and numalts={}'.format( len(data), numalts)) atype = 'numpy' if not GPU else 'cuda' data = np.transpose(data) coeff = np.reshape(np.array(coeff), (1, len(coeff))) data, coeff = PMAT(data, atype), PMAT(coeff, atype) probs = mnl_probs(data, coeff, numalts) if returnprobs: return np.transpose(probs.get_mat()) # convert to cpu from here on - gpu doesn't currently support these ops if probs.typ == 'cuda': probs = PMAT(probs.get_mat()) probs = probs.cumsum(axis=0) r = pmat.random(probs.size() // numalts) choices = probs.subtract(r, inplace=True).firstpositive(axis=0) logger.debug('finish: MNL simulation') return choices.get_mat()
python
def mnl_simulate(data, coeff, numalts, GPU=False, returnprobs=True): logger.debug( 'start: MNL simulation with len(data)={} and numalts={}'.format( len(data), numalts)) atype = 'numpy' if not GPU else 'cuda' data = np.transpose(data) coeff = np.reshape(np.array(coeff), (1, len(coeff))) data, coeff = PMAT(data, atype), PMAT(coeff, atype) probs = mnl_probs(data, coeff, numalts) if returnprobs: return np.transpose(probs.get_mat()) if probs.typ == 'cuda': probs = PMAT(probs.get_mat()) probs = probs.cumsum(axis=0) r = pmat.random(probs.size() // numalts) choices = probs.subtract(r, inplace=True).firstpositive(axis=0) logger.debug('finish: MNL simulation') return choices.get_mat()
[ "def", "mnl_simulate", "(", "data", ",", "coeff", ",", "numalts", ",", "GPU", "=", "False", ",", "returnprobs", "=", "True", ")", ":", "logger", ".", "debug", "(", "'start: MNL simulation with len(data)={} and numalts={}'", ".", "format", "(", "len", "(", "data", ")", ",", "numalts", ")", ")", "atype", "=", "'numpy'", "if", "not", "GPU", "else", "'cuda'", "data", "=", "np", ".", "transpose", "(", "data", ")", "coeff", "=", "np", ".", "reshape", "(", "np", ".", "array", "(", "coeff", ")", ",", "(", "1", ",", "len", "(", "coeff", ")", ")", ")", "data", ",", "coeff", "=", "PMAT", "(", "data", ",", "atype", ")", ",", "PMAT", "(", "coeff", ",", "atype", ")", "probs", "=", "mnl_probs", "(", "data", ",", "coeff", ",", "numalts", ")", "if", "returnprobs", ":", "return", "np", ".", "transpose", "(", "probs", ".", "get_mat", "(", ")", ")", "# convert to cpu from here on - gpu doesn't currently support these ops", "if", "probs", ".", "typ", "==", "'cuda'", ":", "probs", "=", "PMAT", "(", "probs", ".", "get_mat", "(", ")", ")", "probs", "=", "probs", ".", "cumsum", "(", "axis", "=", "0", ")", "r", "=", "pmat", ".", "random", "(", "probs", ".", "size", "(", ")", "//", "numalts", ")", "choices", "=", "probs", ".", "subtract", "(", "r", ",", "inplace", "=", "True", ")", ".", "firstpositive", "(", "axis", "=", "0", ")", "logger", ".", "debug", "(", "'finish: MNL simulation'", ")", "return", "choices", ".", "get_mat", "(", ")" ]
Get the probabilities for each chooser choosing between `numalts` alternatives. Parameters ---------- data : 2D array The data are expected to be in "long" form where each row is for one alternative. Alternatives are in groups of `numalts` rows per choosers. Alternatives must be in the same order for each chooser. coeff : 1D array The model coefficients corresponding to each column in `data`. numalts : int The number of alternatives available to each chooser. GPU : bool, optional returnprobs : bool, optional If True, return the probabilities for each chooser/alternative instead of actual choices. Returns ------- probs or choices: 2D array If `returnprobs` is True the probabilities are a 2D array with a row for each chooser and columns for each alternative.
[ "Get", "the", "probabilities", "for", "each", "chooser", "choosing", "between", "numalts", "alternatives", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/urbanchoice/mnl.py#L124-L175
UDST/urbansim
urbansim/urbanchoice/mnl.py
mnl_estimate
def mnl_estimate(data, chosen, numalts, GPU=False, coeffrange=(-3, 3), weights=None, lcgrad=False, beta=None): """ Calculate coefficients of the MNL model. Parameters ---------- data : 2D array The data are expected to be in "long" form where each row is for one alternative. Alternatives are in groups of `numalts` rows per choosers. Alternatives must be in the same order for each chooser. chosen : 2D array This boolean array has a row for each chooser and a column for each alternative. The column ordering for alternatives is expected to be the same as their row ordering in the `data` array. A one (True) indicates which alternative each chooser has chosen. numalts : int The number of alternatives. GPU : bool, optional coeffrange : tuple of floats, optional Limits of (min, max) to which coefficients are clipped. weights : ndarray, optional lcgrad : bool, optional beta : 1D array, optional Any initial guess for the coefficients. Returns ------- log_likelihood : dict Dictionary of log-likelihood values describing the quality of the model fit. fit_parameters : pandas.DataFrame Table of fit parameters with columns 'Coefficient', 'Std. Error', 'T-Score'. Each row corresponds to a column in `data` and are given in the same order as in `data`. See Also -------- scipy.optimize.fmin_l_bfgs_b : The optimization routine used. """ logger.debug( 'start: MNL fit with len(data)={} and numalts={}'.format( len(data), numalts)) atype = 'numpy' if not GPU else 'cuda' numvars = data.shape[1] numobs = data.shape[0] // numalts if chosen is None: chosen = np.ones((numobs, numalts)) # used for latent classes data = np.transpose(data) chosen = np.transpose(chosen) data, chosen = PMAT(data, atype), PMAT(chosen, atype) if weights is not None: weights = PMAT(np.transpose(weights), atype) if beta is None: beta = np.zeros(numvars) bounds = [coeffrange] * numvars with log_start_finish('scipy optimization for MNL fit', logger): args = (data, chosen, numalts, weights, lcgrad) bfgs_result = scipy.optimize.fmin_l_bfgs_b(mnl_loglik, beta, args=args, fprime=None, factr=10, approx_grad=False, bounds=bounds ) if bfgs_result[2]['warnflag'] > 0: logger.warn("mnl did not converge correctly: %s", bfgs_result) beta = bfgs_result[0] stderr = mnl_loglik( beta, data, chosen, numalts, weights, stderr=1, lcgrad=lcgrad) l0beta = np.zeros(numvars) l0 = -1 * mnl_loglik(l0beta, *args)[0] l1 = -1 * mnl_loglik(beta, *args)[0] log_likelihood = { 'null': float(l0[0][0]), 'convergence': float(l1[0][0]), 'ratio': float((1 - (l1 / l0))[0][0]) } fit_parameters = pd.DataFrame({ 'Coefficient': beta, 'Std. Error': stderr, 'T-Score': beta / stderr}) logger.debug('finish: MNL fit') return log_likelihood, fit_parameters
python
def mnl_estimate(data, chosen, numalts, GPU=False, coeffrange=(-3, 3), weights=None, lcgrad=False, beta=None): logger.debug( 'start: MNL fit with len(data)={} and numalts={}'.format( len(data), numalts)) atype = 'numpy' if not GPU else 'cuda' numvars = data.shape[1] numobs = data.shape[0] // numalts if chosen is None: chosen = np.ones((numobs, numalts)) data = np.transpose(data) chosen = np.transpose(chosen) data, chosen = PMAT(data, atype), PMAT(chosen, atype) if weights is not None: weights = PMAT(np.transpose(weights), atype) if beta is None: beta = np.zeros(numvars) bounds = [coeffrange] * numvars with log_start_finish('scipy optimization for MNL fit', logger): args = (data, chosen, numalts, weights, lcgrad) bfgs_result = scipy.optimize.fmin_l_bfgs_b(mnl_loglik, beta, args=args, fprime=None, factr=10, approx_grad=False, bounds=bounds ) if bfgs_result[2]['warnflag'] > 0: logger.warn("mnl did not converge correctly: %s", bfgs_result) beta = bfgs_result[0] stderr = mnl_loglik( beta, data, chosen, numalts, weights, stderr=1, lcgrad=lcgrad) l0beta = np.zeros(numvars) l0 = -1 * mnl_loglik(l0beta, *args)[0] l1 = -1 * mnl_loglik(beta, *args)[0] log_likelihood = { 'null': float(l0[0][0]), 'convergence': float(l1[0][0]), 'ratio': float((1 - (l1 / l0))[0][0]) } fit_parameters = pd.DataFrame({ 'Coefficient': beta, 'Std. Error': stderr, 'T-Score': beta / stderr}) logger.debug('finish: MNL fit') return log_likelihood, fit_parameters
[ "def", "mnl_estimate", "(", "data", ",", "chosen", ",", "numalts", ",", "GPU", "=", "False", ",", "coeffrange", "=", "(", "-", "3", ",", "3", ")", ",", "weights", "=", "None", ",", "lcgrad", "=", "False", ",", "beta", "=", "None", ")", ":", "logger", ".", "debug", "(", "'start: MNL fit with len(data)={} and numalts={}'", ".", "format", "(", "len", "(", "data", ")", ",", "numalts", ")", ")", "atype", "=", "'numpy'", "if", "not", "GPU", "else", "'cuda'", "numvars", "=", "data", ".", "shape", "[", "1", "]", "numobs", "=", "data", ".", "shape", "[", "0", "]", "//", "numalts", "if", "chosen", "is", "None", ":", "chosen", "=", "np", ".", "ones", "(", "(", "numobs", ",", "numalts", ")", ")", "# used for latent classes", "data", "=", "np", ".", "transpose", "(", "data", ")", "chosen", "=", "np", ".", "transpose", "(", "chosen", ")", "data", ",", "chosen", "=", "PMAT", "(", "data", ",", "atype", ")", ",", "PMAT", "(", "chosen", ",", "atype", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "PMAT", "(", "np", ".", "transpose", "(", "weights", ")", ",", "atype", ")", "if", "beta", "is", "None", ":", "beta", "=", "np", ".", "zeros", "(", "numvars", ")", "bounds", "=", "[", "coeffrange", "]", "*", "numvars", "with", "log_start_finish", "(", "'scipy optimization for MNL fit'", ",", "logger", ")", ":", "args", "=", "(", "data", ",", "chosen", ",", "numalts", ",", "weights", ",", "lcgrad", ")", "bfgs_result", "=", "scipy", ".", "optimize", ".", "fmin_l_bfgs_b", "(", "mnl_loglik", ",", "beta", ",", "args", "=", "args", ",", "fprime", "=", "None", ",", "factr", "=", "10", ",", "approx_grad", "=", "False", ",", "bounds", "=", "bounds", ")", "if", "bfgs_result", "[", "2", "]", "[", "'warnflag'", "]", ">", "0", ":", "logger", ".", "warn", "(", "\"mnl did not converge correctly: %s\"", ",", "bfgs_result", ")", "beta", "=", "bfgs_result", "[", "0", "]", "stderr", "=", "mnl_loglik", "(", "beta", ",", "data", ",", "chosen", ",", "numalts", ",", "weights", ",", "stderr", "=", "1", ",", "lcgrad", "=", "lcgrad", ")", "l0beta", "=", "np", ".", "zeros", "(", "numvars", ")", "l0", "=", "-", "1", "*", "mnl_loglik", "(", "l0beta", ",", "*", "args", ")", "[", "0", "]", "l1", "=", "-", "1", "*", "mnl_loglik", "(", "beta", ",", "*", "args", ")", "[", "0", "]", "log_likelihood", "=", "{", "'null'", ":", "float", "(", "l0", "[", "0", "]", "[", "0", "]", ")", ",", "'convergence'", ":", "float", "(", "l1", "[", "0", "]", "[", "0", "]", ")", ",", "'ratio'", ":", "float", "(", "(", "1", "-", "(", "l1", "/", "l0", ")", ")", "[", "0", "]", "[", "0", "]", ")", "}", "fit_parameters", "=", "pd", ".", "DataFrame", "(", "{", "'Coefficient'", ":", "beta", ",", "'Std. Error'", ":", "stderr", ",", "'T-Score'", ":", "beta", "/", "stderr", "}", ")", "logger", ".", "debug", "(", "'finish: MNL fit'", ")", "return", "log_likelihood", ",", "fit_parameters" ]
Calculate coefficients of the MNL model. Parameters ---------- data : 2D array The data are expected to be in "long" form where each row is for one alternative. Alternatives are in groups of `numalts` rows per choosers. Alternatives must be in the same order for each chooser. chosen : 2D array This boolean array has a row for each chooser and a column for each alternative. The column ordering for alternatives is expected to be the same as their row ordering in the `data` array. A one (True) indicates which alternative each chooser has chosen. numalts : int The number of alternatives. GPU : bool, optional coeffrange : tuple of floats, optional Limits of (min, max) to which coefficients are clipped. weights : ndarray, optional lcgrad : bool, optional beta : 1D array, optional Any initial guess for the coefficients. Returns ------- log_likelihood : dict Dictionary of log-likelihood values describing the quality of the model fit. fit_parameters : pandas.DataFrame Table of fit parameters with columns 'Coefficient', 'Std. Error', 'T-Score'. Each row corresponds to a column in `data` and are given in the same order as in `data`. See Also -------- scipy.optimize.fmin_l_bfgs_b : The optimization routine used.
[ "Calculate", "coefficients", "of", "the", "MNL", "model", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/urbanchoice/mnl.py#L178-L275
UDST/urbansim
urbansim/models/dcm.py
unit_choice
def unit_choice(chooser_ids, alternative_ids, probabilities): """ Have a set of choosers choose from among alternatives according to a probability distribution. Choice is binary: each alternative can only be chosen once. Parameters ---------- chooser_ids : 1d array_like Array of IDs of the agents that are making choices. alternative_ids : 1d array_like Array of IDs of alternatives among which agents are making choices. probabilities : 1d array_like The probability that an agent will choose an alternative. Must be the same shape as `alternative_ids`. Unavailable alternatives should have a probability of 0. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. """ chooser_ids = np.asanyarray(chooser_ids) alternative_ids = np.asanyarray(alternative_ids) probabilities = np.asanyarray(probabilities) logger.debug( 'start: unit choice with {} choosers and {} alternatives'.format( len(chooser_ids), len(alternative_ids))) choices = pd.Series(index=chooser_ids) if probabilities.sum() == 0: # return all nan if there are no available units return choices # probabilities need to sum to 1 for np.random.choice probabilities = probabilities / probabilities.sum() # need to see if there are as many available alternatives as choosers n_available = np.count_nonzero(probabilities) n_choosers = len(chooser_ids) n_to_choose = n_choosers if n_choosers < n_available else n_available chosen = np.random.choice( alternative_ids, size=n_to_choose, replace=False, p=probabilities) # if there are fewer available units than choosers we need to pick # which choosers get a unit if n_to_choose == n_available: chooser_ids = np.random.choice( chooser_ids, size=n_to_choose, replace=False) choices[chooser_ids] = chosen logger.debug('finish: unit choice') return choices
python
def unit_choice(chooser_ids, alternative_ids, probabilities): chooser_ids = np.asanyarray(chooser_ids) alternative_ids = np.asanyarray(alternative_ids) probabilities = np.asanyarray(probabilities) logger.debug( 'start: unit choice with {} choosers and {} alternatives'.format( len(chooser_ids), len(alternative_ids))) choices = pd.Series(index=chooser_ids) if probabilities.sum() == 0: return choices probabilities = probabilities / probabilities.sum() n_available = np.count_nonzero(probabilities) n_choosers = len(chooser_ids) n_to_choose = n_choosers if n_choosers < n_available else n_available chosen = np.random.choice( alternative_ids, size=n_to_choose, replace=False, p=probabilities) if n_to_choose == n_available: chooser_ids = np.random.choice( chooser_ids, size=n_to_choose, replace=False) choices[chooser_ids] = chosen logger.debug('finish: unit choice') return choices
[ "def", "unit_choice", "(", "chooser_ids", ",", "alternative_ids", ",", "probabilities", ")", ":", "chooser_ids", "=", "np", ".", "asanyarray", "(", "chooser_ids", ")", "alternative_ids", "=", "np", ".", "asanyarray", "(", "alternative_ids", ")", "probabilities", "=", "np", ".", "asanyarray", "(", "probabilities", ")", "logger", ".", "debug", "(", "'start: unit choice with {} choosers and {} alternatives'", ".", "format", "(", "len", "(", "chooser_ids", ")", ",", "len", "(", "alternative_ids", ")", ")", ")", "choices", "=", "pd", ".", "Series", "(", "index", "=", "chooser_ids", ")", "if", "probabilities", ".", "sum", "(", ")", "==", "0", ":", "# return all nan if there are no available units", "return", "choices", "# probabilities need to sum to 1 for np.random.choice", "probabilities", "=", "probabilities", "/", "probabilities", ".", "sum", "(", ")", "# need to see if there are as many available alternatives as choosers", "n_available", "=", "np", ".", "count_nonzero", "(", "probabilities", ")", "n_choosers", "=", "len", "(", "chooser_ids", ")", "n_to_choose", "=", "n_choosers", "if", "n_choosers", "<", "n_available", "else", "n_available", "chosen", "=", "np", ".", "random", ".", "choice", "(", "alternative_ids", ",", "size", "=", "n_to_choose", ",", "replace", "=", "False", ",", "p", "=", "probabilities", ")", "# if there are fewer available units than choosers we need to pick", "# which choosers get a unit", "if", "n_to_choose", "==", "n_available", ":", "chooser_ids", "=", "np", ".", "random", ".", "choice", "(", "chooser_ids", ",", "size", "=", "n_to_choose", ",", "replace", "=", "False", ")", "choices", "[", "chooser_ids", "]", "=", "chosen", "logger", ".", "debug", "(", "'finish: unit choice'", ")", "return", "choices" ]
Have a set of choosers choose from among alternatives according to a probability distribution. Choice is binary: each alternative can only be chosen once. Parameters ---------- chooser_ids : 1d array_like Array of IDs of the agents that are making choices. alternative_ids : 1d array_like Array of IDs of alternatives among which agents are making choices. probabilities : 1d array_like The probability that an agent will choose an alternative. Must be the same shape as `alternative_ids`. Unavailable alternatives should have a probability of 0. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers.
[ "Have", "a", "set", "of", "choosers", "choose", "from", "among", "alternatives", "according", "to", "a", "probability", "distribution", ".", "Choice", "is", "binary", ":", "each", "alternative", "can", "only", "be", "chosen", "once", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L26-L85
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.from_yaml
def from_yaml(cls, yaml_str=None, str_or_buffer=None): """ Create a DiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- MNLDiscreteChoiceModel """ cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) model = cls( cfg['model_expression'], cfg['sample_size'], probability_mode=cfg.get('probability_mode', 'full_product'), choice_mode=cfg.get('choice_mode', 'individual'), choosers_fit_filters=cfg.get('choosers_fit_filters', None), choosers_predict_filters=cfg.get('choosers_predict_filters', None), alts_fit_filters=cfg.get('alts_fit_filters', None), alts_predict_filters=cfg.get('alts_predict_filters', None), interaction_predict_filters=cfg.get( 'interaction_predict_filters', None), estimation_sample_size=cfg.get('estimation_sample_size', None), prediction_sample_size=cfg.get('prediction_sample_size', None), choice_column=cfg.get('choice_column', None), name=cfg.get('name', None) ) if cfg.get('log_likelihoods', None): model.log_likelihoods = cfg['log_likelihoods'] if cfg.get('fit_parameters', None): model.fit_parameters = pd.DataFrame(cfg['fit_parameters']) logger.debug('loaded LCM model {} from YAML'.format(model.name)) return model
python
def from_yaml(cls, yaml_str=None, str_or_buffer=None): cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) model = cls( cfg['model_expression'], cfg['sample_size'], probability_mode=cfg.get('probability_mode', 'full_product'), choice_mode=cfg.get('choice_mode', 'individual'), choosers_fit_filters=cfg.get('choosers_fit_filters', None), choosers_predict_filters=cfg.get('choosers_predict_filters', None), alts_fit_filters=cfg.get('alts_fit_filters', None), alts_predict_filters=cfg.get('alts_predict_filters', None), interaction_predict_filters=cfg.get( 'interaction_predict_filters', None), estimation_sample_size=cfg.get('estimation_sample_size', None), prediction_sample_size=cfg.get('prediction_sample_size', None), choice_column=cfg.get('choice_column', None), name=cfg.get('name', None) ) if cfg.get('log_likelihoods', None): model.log_likelihoods = cfg['log_likelihoods'] if cfg.get('fit_parameters', None): model.fit_parameters = pd.DataFrame(cfg['fit_parameters']) logger.debug('loaded LCM model {} from YAML'.format(model.name)) return model
[ "def", "from_yaml", "(", "cls", ",", "yaml_str", "=", "None", ",", "str_or_buffer", "=", "None", ")", ":", "cfg", "=", "yamlio", ".", "yaml_to_dict", "(", "yaml_str", ",", "str_or_buffer", ")", "model", "=", "cls", "(", "cfg", "[", "'model_expression'", "]", ",", "cfg", "[", "'sample_size'", "]", ",", "probability_mode", "=", "cfg", ".", "get", "(", "'probability_mode'", ",", "'full_product'", ")", ",", "choice_mode", "=", "cfg", ".", "get", "(", "'choice_mode'", ",", "'individual'", ")", ",", "choosers_fit_filters", "=", "cfg", ".", "get", "(", "'choosers_fit_filters'", ",", "None", ")", ",", "choosers_predict_filters", "=", "cfg", ".", "get", "(", "'choosers_predict_filters'", ",", "None", ")", ",", "alts_fit_filters", "=", "cfg", ".", "get", "(", "'alts_fit_filters'", ",", "None", ")", ",", "alts_predict_filters", "=", "cfg", ".", "get", "(", "'alts_predict_filters'", ",", "None", ")", ",", "interaction_predict_filters", "=", "cfg", ".", "get", "(", "'interaction_predict_filters'", ",", "None", ")", ",", "estimation_sample_size", "=", "cfg", ".", "get", "(", "'estimation_sample_size'", ",", "None", ")", ",", "prediction_sample_size", "=", "cfg", ".", "get", "(", "'prediction_sample_size'", ",", "None", ")", ",", "choice_column", "=", "cfg", ".", "get", "(", "'choice_column'", ",", "None", ")", ",", "name", "=", "cfg", ".", "get", "(", "'name'", ",", "None", ")", ")", "if", "cfg", ".", "get", "(", "'log_likelihoods'", ",", "None", ")", ":", "model", ".", "log_likelihoods", "=", "cfg", "[", "'log_likelihoods'", "]", "if", "cfg", ".", "get", "(", "'fit_parameters'", ",", "None", ")", ":", "model", ".", "fit_parameters", "=", "pd", ".", "DataFrame", "(", "cfg", "[", "'fit_parameters'", "]", ")", "logger", ".", "debug", "(", "'loaded LCM model {} from YAML'", ".", "format", "(", "model", ".", "name", ")", ")", "return", "model" ]
Create a DiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- MNLDiscreteChoiceModel
[ "Create", "a", "DiscreteChoiceModel", "instance", "from", "a", "saved", "YAML", "configuration", ".", "Arguments", "are", "mutally", "exclusive", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L278-L320
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.apply_fit_filters
def apply_fit_filters(self, choosers, alternatives): """ Filter `choosers` and `alternatives` for fitting. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame """ return super(MNLDiscreteChoiceModel, self).apply_fit_filters( choosers, alternatives)
python
def apply_fit_filters(self, choosers, alternatives): return super(MNLDiscreteChoiceModel, self).apply_fit_filters( choosers, alternatives)
[ "def", "apply_fit_filters", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "return", "super", "(", "MNLDiscreteChoiceModel", ",", "self", ")", ".", "apply_fit_filters", "(", "choosers", ",", "alternatives", ")" ]
Filter `choosers` and `alternatives` for fitting. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame
[ "Filter", "choosers", "and", "alternatives", "for", "fitting", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L331-L349
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.apply_predict_filters
def apply_predict_filters(self, choosers, alternatives): """ Filter `choosers` and `alternatives` for prediction. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame """ return super(MNLDiscreteChoiceModel, self).apply_predict_filters( choosers, alternatives)
python
def apply_predict_filters(self, choosers, alternatives): return super(MNLDiscreteChoiceModel, self).apply_predict_filters( choosers, alternatives)
[ "def", "apply_predict_filters", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "return", "super", "(", "MNLDiscreteChoiceModel", ",", "self", ")", ".", "apply_predict_filters", "(", "choosers", ",", "alternatives", ")" ]
Filter `choosers` and `alternatives` for prediction. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame
[ "Filter", "choosers", "and", "alternatives", "for", "prediction", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L351-L369
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.fit
def fit(self, choosers, alternatives, current_choice): """ Fit and save model parameters based on given data. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice : pandas.Series or any A Series describing the `alternatives` currently chosen by the `choosers`. Should have an index matching `choosers` and values matching the index of `alternatives`. If a non-Series is given it should be a column in `choosers`. Returns ------- log_likelihoods : dict Dict of log-liklihood values describing the quality of the model fit. Will have keys 'null', 'convergence', and 'ratio'. """ logger.debug('start: fit LCM model {}'.format(self.name)) if not isinstance(current_choice, pd.Series): current_choice = choosers[current_choice] choosers, alternatives = self.apply_fit_filters(choosers, alternatives) if self.estimation_sample_size: choosers = choosers.loc[np.random.choice( choosers.index, min(self.estimation_sample_size, len(choosers)), replace=False)] current_choice = current_choice.loc[choosers.index] _, merged, chosen = interaction.mnl_interaction_dataset( choosers, alternatives, self.sample_size, current_choice) model_design = dmatrix( self.str_model_expression, data=merged, return_type='dataframe') if len(merged) != model_design.as_matrix().shape[0]: raise ModelEvaluationError( 'Estimated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') self.log_likelihoods, self.fit_parameters = mnl.mnl_estimate( model_design.as_matrix(), chosen, self.sample_size) self.fit_parameters.index = model_design.columns logger.debug('finish: fit LCM model {}'.format(self.name)) return self.log_likelihoods
python
def fit(self, choosers, alternatives, current_choice): logger.debug('start: fit LCM model {}'.format(self.name)) if not isinstance(current_choice, pd.Series): current_choice = choosers[current_choice] choosers, alternatives = self.apply_fit_filters(choosers, alternatives) if self.estimation_sample_size: choosers = choosers.loc[np.random.choice( choosers.index, min(self.estimation_sample_size, len(choosers)), replace=False)] current_choice = current_choice.loc[choosers.index] _, merged, chosen = interaction.mnl_interaction_dataset( choosers, alternatives, self.sample_size, current_choice) model_design = dmatrix( self.str_model_expression, data=merged, return_type='dataframe') if len(merged) != model_design.as_matrix().shape[0]: raise ModelEvaluationError( 'Estimated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') self.log_likelihoods, self.fit_parameters = mnl.mnl_estimate( model_design.as_matrix(), chosen, self.sample_size) self.fit_parameters.index = model_design.columns logger.debug('finish: fit LCM model {}'.format(self.name)) return self.log_likelihoods
[ "def", "fit", "(", "self", ",", "choosers", ",", "alternatives", ",", "current_choice", ")", ":", "logger", ".", "debug", "(", "'start: fit LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "if", "not", "isinstance", "(", "current_choice", ",", "pd", ".", "Series", ")", ":", "current_choice", "=", "choosers", "[", "current_choice", "]", "choosers", ",", "alternatives", "=", "self", ".", "apply_fit_filters", "(", "choosers", ",", "alternatives", ")", "if", "self", ".", "estimation_sample_size", ":", "choosers", "=", "choosers", ".", "loc", "[", "np", ".", "random", ".", "choice", "(", "choosers", ".", "index", ",", "min", "(", "self", ".", "estimation_sample_size", ",", "len", "(", "choosers", ")", ")", ",", "replace", "=", "False", ")", "]", "current_choice", "=", "current_choice", ".", "loc", "[", "choosers", ".", "index", "]", "_", ",", "merged", ",", "chosen", "=", "interaction", ".", "mnl_interaction_dataset", "(", "choosers", ",", "alternatives", ",", "self", ".", "sample_size", ",", "current_choice", ")", "model_design", "=", "dmatrix", "(", "self", ".", "str_model_expression", ",", "data", "=", "merged", ",", "return_type", "=", "'dataframe'", ")", "if", "len", "(", "merged", ")", "!=", "model_design", ".", "as_matrix", "(", ")", ".", "shape", "[", "0", "]", ":", "raise", "ModelEvaluationError", "(", "'Estimated data does not have the same length as input. '", "'This suggests there are null values in one or more of '", "'the input columns.'", ")", "self", ".", "log_likelihoods", ",", "self", ".", "fit_parameters", "=", "mnl", ".", "mnl_estimate", "(", "model_design", ".", "as_matrix", "(", ")", ",", "chosen", ",", "self", ".", "sample_size", ")", "self", ".", "fit_parameters", ".", "index", "=", "model_design", ".", "columns", "logger", ".", "debug", "(", "'finish: fit LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "self", ".", "log_likelihoods" ]
Fit and save model parameters based on given data. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice : pandas.Series or any A Series describing the `alternatives` currently chosen by the `choosers`. Should have an index matching `choosers` and values matching the index of `alternatives`. If a non-Series is given it should be a column in `choosers`. Returns ------- log_likelihoods : dict Dict of log-liklihood values describing the quality of the model fit. Will have keys 'null', 'convergence', and 'ratio'.
[ "Fit", "and", "save", "model", "parameters", "based", "on", "given", "data", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L371-L427
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.report_fit
def report_fit(self): """ Print a report of the fit results. """ if not self.fitted: print('Model not yet fit.') return print('Null Log-liklihood: {0:.3f}'.format( self.log_likelihoods['null'])) print('Log-liklihood at convergence: {0:.3f}'.format( self.log_likelihoods['convergence'])) print('Log-liklihood Ratio: {0:.3f}\n'.format( self.log_likelihoods['ratio'])) tbl = PrettyTable( ['Component', ]) tbl = PrettyTable() tbl.add_column('Component', self.fit_parameters.index.values) for col in ('Coefficient', 'Std. Error', 'T-Score'): tbl.add_column(col, self.fit_parameters[col].values) tbl.align['Component'] = 'l' tbl.float_format = '.3' print(tbl)
python
def report_fit(self): if not self.fitted: print('Model not yet fit.') return print('Null Log-liklihood: {0:.3f}'.format( self.log_likelihoods['null'])) print('Log-liklihood at convergence: {0:.3f}'.format( self.log_likelihoods['convergence'])) print('Log-liklihood Ratio: {0:.3f}\n'.format( self.log_likelihoods['ratio'])) tbl = PrettyTable( ['Component', ]) tbl = PrettyTable() tbl.add_column('Component', self.fit_parameters.index.values) for col in ('Coefficient', 'Std. Error', 'T-Score'): tbl.add_column(col, self.fit_parameters[col].values) tbl.align['Component'] = 'l' tbl.float_format = '.3' print(tbl)
[ "def", "report_fit", "(", "self", ")", ":", "if", "not", "self", ".", "fitted", ":", "print", "(", "'Model not yet fit.'", ")", "return", "print", "(", "'Null Log-liklihood: {0:.3f}'", ".", "format", "(", "self", ".", "log_likelihoods", "[", "'null'", "]", ")", ")", "print", "(", "'Log-liklihood at convergence: {0:.3f}'", ".", "format", "(", "self", ".", "log_likelihoods", "[", "'convergence'", "]", ")", ")", "print", "(", "'Log-liklihood Ratio: {0:.3f}\\n'", ".", "format", "(", "self", ".", "log_likelihoods", "[", "'ratio'", "]", ")", ")", "tbl", "=", "PrettyTable", "(", "[", "'Component'", ",", "]", ")", "tbl", "=", "PrettyTable", "(", ")", "tbl", ".", "add_column", "(", "'Component'", ",", "self", ".", "fit_parameters", ".", "index", ".", "values", ")", "for", "col", "in", "(", "'Coefficient'", ",", "'Std. Error'", ",", "'T-Score'", ")", ":", "tbl", ".", "add_column", "(", "col", ",", "self", ".", "fit_parameters", "[", "col", "]", ".", "values", ")", "tbl", ".", "align", "[", "'Component'", "]", "=", "'l'", "tbl", ".", "float_format", "=", "'.3'", "print", "(", "tbl", ")" ]
Print a report of the fit results.
[ "Print", "a", "report", "of", "the", "fit", "results", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L445-L472
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.probabilities
def probabilities(self, choosers, alternatives, filter_tables=True): """ Returns the probabilities for a set of choosers to choose from among a set of alternatives. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. filter_tables : bool, optional If True, filter `choosers` and `alternatives` with prediction filters before calculating probabilities. Returns ------- probabilities : pandas.Series Probability of selection associated with each chooser and alternative. Index will be a MultiIndex with alternative IDs in the inner index and chooser IDs in the out index. """ logger.debug('start: calculate probabilities for LCM model {}'.format( self.name)) self.assert_fitted() if filter_tables: choosers, alternatives = self.apply_predict_filters( choosers, alternatives) if self.prediction_sample_size is not None: sample_size = self.prediction_sample_size else: sample_size = len(alternatives) if self.probability_mode == 'single_chooser': _, merged, _ = interaction.mnl_interaction_dataset( choosers.head(1), alternatives, sample_size) elif self.probability_mode == 'full_product': _, merged, _ = interaction.mnl_interaction_dataset( choosers, alternatives, sample_size) else: raise ValueError( 'Unrecognized probability_mode option: {}'.format( self.probability_mode)) merged = util.apply_filter_query( merged, self.interaction_predict_filters) model_design = dmatrix( self.str_model_expression, data=merged, return_type='dataframe') if len(merged) != model_design.as_matrix().shape[0]: raise ModelEvaluationError( 'Simulated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') # get the order of the coefficients in the same order as the # columns in the design matrix coeffs = [self.fit_parameters['Coefficient'][x] for x in model_design.columns] # probabilities are returned from mnl_simulate as a 2d array # with choosers along rows and alternatives along columns if self.probability_mode == 'single_chooser': numalts = len(merged) else: numalts = sample_size probabilities = mnl.mnl_simulate( model_design.as_matrix(), coeffs, numalts=numalts, returnprobs=True) # want to turn probabilities into a Series with a MultiIndex # of chooser IDs and alternative IDs. # indexing by chooser ID will get you the probabilities # across alternatives for that chooser mi = pd.MultiIndex.from_arrays( [merged['join_index'].values, merged.index.values], names=('chooser_id', 'alternative_id')) probabilities = pd.Series(probabilities.flatten(), index=mi) logger.debug('finish: calculate probabilities for LCM model {}'.format( self.name)) return probabilities
python
def probabilities(self, choosers, alternatives, filter_tables=True): logger.debug('start: calculate probabilities for LCM model {}'.format( self.name)) self.assert_fitted() if filter_tables: choosers, alternatives = self.apply_predict_filters( choosers, alternatives) if self.prediction_sample_size is not None: sample_size = self.prediction_sample_size else: sample_size = len(alternatives) if self.probability_mode == 'single_chooser': _, merged, _ = interaction.mnl_interaction_dataset( choosers.head(1), alternatives, sample_size) elif self.probability_mode == 'full_product': _, merged, _ = interaction.mnl_interaction_dataset( choosers, alternatives, sample_size) else: raise ValueError( 'Unrecognized probability_mode option: {}'.format( self.probability_mode)) merged = util.apply_filter_query( merged, self.interaction_predict_filters) model_design = dmatrix( self.str_model_expression, data=merged, return_type='dataframe') if len(merged) != model_design.as_matrix().shape[0]: raise ModelEvaluationError( 'Simulated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') coeffs = [self.fit_parameters['Coefficient'][x] for x in model_design.columns] if self.probability_mode == 'single_chooser': numalts = len(merged) else: numalts = sample_size probabilities = mnl.mnl_simulate( model_design.as_matrix(), coeffs, numalts=numalts, returnprobs=True) mi = pd.MultiIndex.from_arrays( [merged['join_index'].values, merged.index.values], names=('chooser_id', 'alternative_id')) probabilities = pd.Series(probabilities.flatten(), index=mi) logger.debug('finish: calculate probabilities for LCM model {}'.format( self.name)) return probabilities
[ "def", "probabilities", "(", "self", ",", "choosers", ",", "alternatives", ",", "filter_tables", "=", "True", ")", ":", "logger", ".", "debug", "(", "'start: calculate probabilities for LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "self", ".", "assert_fitted", "(", ")", "if", "filter_tables", ":", "choosers", ",", "alternatives", "=", "self", ".", "apply_predict_filters", "(", "choosers", ",", "alternatives", ")", "if", "self", ".", "prediction_sample_size", "is", "not", "None", ":", "sample_size", "=", "self", ".", "prediction_sample_size", "else", ":", "sample_size", "=", "len", "(", "alternatives", ")", "if", "self", ".", "probability_mode", "==", "'single_chooser'", ":", "_", ",", "merged", ",", "_", "=", "interaction", ".", "mnl_interaction_dataset", "(", "choosers", ".", "head", "(", "1", ")", ",", "alternatives", ",", "sample_size", ")", "elif", "self", ".", "probability_mode", "==", "'full_product'", ":", "_", ",", "merged", ",", "_", "=", "interaction", ".", "mnl_interaction_dataset", "(", "choosers", ",", "alternatives", ",", "sample_size", ")", "else", ":", "raise", "ValueError", "(", "'Unrecognized probability_mode option: {}'", ".", "format", "(", "self", ".", "probability_mode", ")", ")", "merged", "=", "util", ".", "apply_filter_query", "(", "merged", ",", "self", ".", "interaction_predict_filters", ")", "model_design", "=", "dmatrix", "(", "self", ".", "str_model_expression", ",", "data", "=", "merged", ",", "return_type", "=", "'dataframe'", ")", "if", "len", "(", "merged", ")", "!=", "model_design", ".", "as_matrix", "(", ")", ".", "shape", "[", "0", "]", ":", "raise", "ModelEvaluationError", "(", "'Simulated data does not have the same length as input. '", "'This suggests there are null values in one or more of '", "'the input columns.'", ")", "# get the order of the coefficients in the same order as the", "# columns in the design matrix", "coeffs", "=", "[", "self", ".", "fit_parameters", "[", "'Coefficient'", "]", "[", "x", "]", "for", "x", "in", "model_design", ".", "columns", "]", "# probabilities are returned from mnl_simulate as a 2d array", "# with choosers along rows and alternatives along columns", "if", "self", ".", "probability_mode", "==", "'single_chooser'", ":", "numalts", "=", "len", "(", "merged", ")", "else", ":", "numalts", "=", "sample_size", "probabilities", "=", "mnl", ".", "mnl_simulate", "(", "model_design", ".", "as_matrix", "(", ")", ",", "coeffs", ",", "numalts", "=", "numalts", ",", "returnprobs", "=", "True", ")", "# want to turn probabilities into a Series with a MultiIndex", "# of chooser IDs and alternative IDs.", "# indexing by chooser ID will get you the probabilities", "# across alternatives for that chooser", "mi", "=", "pd", ".", "MultiIndex", ".", "from_arrays", "(", "[", "merged", "[", "'join_index'", "]", ".", "values", ",", "merged", ".", "index", ".", "values", "]", ",", "names", "=", "(", "'chooser_id'", ",", "'alternative_id'", ")", ")", "probabilities", "=", "pd", ".", "Series", "(", "probabilities", ".", "flatten", "(", ")", ",", "index", "=", "mi", ")", "logger", ".", "debug", "(", "'finish: calculate probabilities for LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "probabilities" ]
Returns the probabilities for a set of choosers to choose from among a set of alternatives. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. filter_tables : bool, optional If True, filter `choosers` and `alternatives` with prediction filters before calculating probabilities. Returns ------- probabilities : pandas.Series Probability of selection associated with each chooser and alternative. Index will be a MultiIndex with alternative IDs in the inner index and chooser IDs in the out index.
[ "Returns", "the", "probabilities", "for", "a", "set", "of", "choosers", "to", "choose", "from", "among", "a", "set", "of", "alternatives", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L474-L560
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.summed_probabilities
def summed_probabilities(self, choosers, alternatives): """ Calculate total probability associated with each alternative. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Total probability associated with each alternative. """ def normalize(s): return s / s.sum() choosers, alternatives = self.apply_predict_filters( choosers, alternatives) probs = self.probabilities(choosers, alternatives, filter_tables=False) # groupby the the alternatives ID and sum if self.probability_mode == 'single_chooser': return ( normalize(probs) * len(choosers) ).reset_index(level=0, drop=True) elif self.probability_mode == 'full_product': return probs.groupby(level=0).apply(normalize)\ .groupby(level=1).sum() else: raise ValueError( 'Unrecognized probability_mode option: {}'.format( self.probability_mode))
python
def summed_probabilities(self, choosers, alternatives): def normalize(s): return s / s.sum() choosers, alternatives = self.apply_predict_filters( choosers, alternatives) probs = self.probabilities(choosers, alternatives, filter_tables=False) if self.probability_mode == 'single_chooser': return ( normalize(probs) * len(choosers) ).reset_index(level=0, drop=True) elif self.probability_mode == 'full_product': return probs.groupby(level=0).apply(normalize)\ .groupby(level=1).sum() else: raise ValueError( 'Unrecognized probability_mode option: {}'.format( self.probability_mode))
[ "def", "summed_probabilities", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "def", "normalize", "(", "s", ")", ":", "return", "s", "/", "s", ".", "sum", "(", ")", "choosers", ",", "alternatives", "=", "self", ".", "apply_predict_filters", "(", "choosers", ",", "alternatives", ")", "probs", "=", "self", ".", "probabilities", "(", "choosers", ",", "alternatives", ",", "filter_tables", "=", "False", ")", "# groupby the the alternatives ID and sum", "if", "self", ".", "probability_mode", "==", "'single_chooser'", ":", "return", "(", "normalize", "(", "probs", ")", "*", "len", "(", "choosers", ")", ")", ".", "reset_index", "(", "level", "=", "0", ",", "drop", "=", "True", ")", "elif", "self", ".", "probability_mode", "==", "'full_product'", ":", "return", "probs", ".", "groupby", "(", "level", "=", "0", ")", ".", "apply", "(", "normalize", ")", ".", "groupby", "(", "level", "=", "1", ")", ".", "sum", "(", ")", "else", ":", "raise", "ValueError", "(", "'Unrecognized probability_mode option: {}'", ".", "format", "(", "self", ".", "probability_mode", ")", ")" ]
Calculate total probability associated with each alternative. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Total probability associated with each alternative.
[ "Calculate", "total", "probability", "associated", "with", "each", "alternative", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L562-L597
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.predict
def predict(self, choosers, alternatives, debug=False): """ Choose from among alternatives for a group of agents. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. debug : bool If debug is set to true, will set the variable "sim_pdf" on the object to store the probabilities for mapping of the outcome. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. """ self.assert_fitted() logger.debug('start: predict LCM model {}'.format(self.name)) choosers, alternatives = self.apply_predict_filters( choosers, alternatives) if len(choosers) == 0: return pd.Series() if len(alternatives) == 0: return pd.Series(index=choosers.index) probabilities = self.probabilities( choosers, alternatives, filter_tables=False) if debug: self.sim_pdf = probabilities if self.choice_mode == 'aggregate': choices = unit_choice( choosers.index.values, probabilities.index.get_level_values('alternative_id').values, probabilities.values) elif self.choice_mode == 'individual': def mkchoice(probs): probs.reset_index(0, drop=True, inplace=True) return np.random.choice( probs.index.values, p=probs.values / probs.sum()) choices = probabilities.groupby(level='chooser_id', sort=False)\ .apply(mkchoice) else: raise ValueError( 'Unrecognized choice_mode option: {}'.format(self.choice_mode)) logger.debug('finish: predict LCM model {}'.format(self.name)) return choices
python
def predict(self, choosers, alternatives, debug=False): self.assert_fitted() logger.debug('start: predict LCM model {}'.format(self.name)) choosers, alternatives = self.apply_predict_filters( choosers, alternatives) if len(choosers) == 0: return pd.Series() if len(alternatives) == 0: return pd.Series(index=choosers.index) probabilities = self.probabilities( choosers, alternatives, filter_tables=False) if debug: self.sim_pdf = probabilities if self.choice_mode == 'aggregate': choices = unit_choice( choosers.index.values, probabilities.index.get_level_values('alternative_id').values, probabilities.values) elif self.choice_mode == 'individual': def mkchoice(probs): probs.reset_index(0, drop=True, inplace=True) return np.random.choice( probs.index.values, p=probs.values / probs.sum()) choices = probabilities.groupby(level='chooser_id', sort=False)\ .apply(mkchoice) else: raise ValueError( 'Unrecognized choice_mode option: {}'.format(self.choice_mode)) logger.debug('finish: predict LCM model {}'.format(self.name)) return choices
[ "def", "predict", "(", "self", ",", "choosers", ",", "alternatives", ",", "debug", "=", "False", ")", ":", "self", ".", "assert_fitted", "(", ")", "logger", ".", "debug", "(", "'start: predict LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "choosers", ",", "alternatives", "=", "self", ".", "apply_predict_filters", "(", "choosers", ",", "alternatives", ")", "if", "len", "(", "choosers", ")", "==", "0", ":", "return", "pd", ".", "Series", "(", ")", "if", "len", "(", "alternatives", ")", "==", "0", ":", "return", "pd", ".", "Series", "(", "index", "=", "choosers", ".", "index", ")", "probabilities", "=", "self", ".", "probabilities", "(", "choosers", ",", "alternatives", ",", "filter_tables", "=", "False", ")", "if", "debug", ":", "self", ".", "sim_pdf", "=", "probabilities", "if", "self", ".", "choice_mode", "==", "'aggregate'", ":", "choices", "=", "unit_choice", "(", "choosers", ".", "index", ".", "values", ",", "probabilities", ".", "index", ".", "get_level_values", "(", "'alternative_id'", ")", ".", "values", ",", "probabilities", ".", "values", ")", "elif", "self", ".", "choice_mode", "==", "'individual'", ":", "def", "mkchoice", "(", "probs", ")", ":", "probs", ".", "reset_index", "(", "0", ",", "drop", "=", "True", ",", "inplace", "=", "True", ")", "return", "np", ".", "random", ".", "choice", "(", "probs", ".", "index", ".", "values", ",", "p", "=", "probs", ".", "values", "/", "probs", ".", "sum", "(", ")", ")", "choices", "=", "probabilities", ".", "groupby", "(", "level", "=", "'chooser_id'", ",", "sort", "=", "False", ")", ".", "apply", "(", "mkchoice", ")", "else", ":", "raise", "ValueError", "(", "'Unrecognized choice_mode option: {}'", ".", "format", "(", "self", ".", "choice_mode", ")", ")", "logger", ".", "debug", "(", "'finish: predict LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "choices" ]
Choose from among alternatives for a group of agents. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. debug : bool If debug is set to true, will set the variable "sim_pdf" on the object to store the probabilities for mapping of the outcome. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers.
[ "Choose", "from", "among", "alternatives", "for", "a", "group", "of", "agents", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L599-L657
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.to_dict
def to_dict(self): """ Return a dict respresentation of an MNLDiscreteChoiceModel instance. """ return { 'model_type': 'discretechoice', 'model_expression': self.model_expression, 'sample_size': self.sample_size, 'name': self.name, 'probability_mode': self.probability_mode, 'choice_mode': self.choice_mode, 'choosers_fit_filters': self.choosers_fit_filters, 'choosers_predict_filters': self.choosers_predict_filters, 'alts_fit_filters': self.alts_fit_filters, 'alts_predict_filters': self.alts_predict_filters, 'interaction_predict_filters': self.interaction_predict_filters, 'estimation_sample_size': self.estimation_sample_size, 'prediction_sample_size': self.prediction_sample_size, 'choice_column': self.choice_column, 'fitted': self.fitted, 'log_likelihoods': self.log_likelihoods, 'fit_parameters': (yamlio.frame_to_yaml_safe(self.fit_parameters) if self.fitted else None) }
python
def to_dict(self): return { 'model_type': 'discretechoice', 'model_expression': self.model_expression, 'sample_size': self.sample_size, 'name': self.name, 'probability_mode': self.probability_mode, 'choice_mode': self.choice_mode, 'choosers_fit_filters': self.choosers_fit_filters, 'choosers_predict_filters': self.choosers_predict_filters, 'alts_fit_filters': self.alts_fit_filters, 'alts_predict_filters': self.alts_predict_filters, 'interaction_predict_filters': self.interaction_predict_filters, 'estimation_sample_size': self.estimation_sample_size, 'prediction_sample_size': self.prediction_sample_size, 'choice_column': self.choice_column, 'fitted': self.fitted, 'log_likelihoods': self.log_likelihoods, 'fit_parameters': (yamlio.frame_to_yaml_safe(self.fit_parameters) if self.fitted else None) }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'model_type'", ":", "'discretechoice'", ",", "'model_expression'", ":", "self", ".", "model_expression", ",", "'sample_size'", ":", "self", ".", "sample_size", ",", "'name'", ":", "self", ".", "name", ",", "'probability_mode'", ":", "self", ".", "probability_mode", ",", "'choice_mode'", ":", "self", ".", "choice_mode", ",", "'choosers_fit_filters'", ":", "self", ".", "choosers_fit_filters", ",", "'choosers_predict_filters'", ":", "self", ".", "choosers_predict_filters", ",", "'alts_fit_filters'", ":", "self", ".", "alts_fit_filters", ",", "'alts_predict_filters'", ":", "self", ".", "alts_predict_filters", ",", "'interaction_predict_filters'", ":", "self", ".", "interaction_predict_filters", ",", "'estimation_sample_size'", ":", "self", ".", "estimation_sample_size", ",", "'prediction_sample_size'", ":", "self", ".", "prediction_sample_size", ",", "'choice_column'", ":", "self", ".", "choice_column", ",", "'fitted'", ":", "self", ".", "fitted", ",", "'log_likelihoods'", ":", "self", ".", "log_likelihoods", ",", "'fit_parameters'", ":", "(", "yamlio", ".", "frame_to_yaml_safe", "(", "self", ".", "fit_parameters", ")", "if", "self", ".", "fitted", "else", "None", ")", "}" ]
Return a dict respresentation of an MNLDiscreteChoiceModel instance.
[ "Return", "a", "dict", "respresentation", "of", "an", "MNLDiscreteChoiceModel", "instance", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L659-L684
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.to_yaml
def to_yaml(self, str_or_buffer=None): """ Save a model respresentation to YAML. Parameters ---------- str_or_buffer : str or file like, optional By default a YAML string is returned. If a string is given here the YAML will be written to that file. If an object with a ``.write`` method is given the YAML will be written to that object. Returns ------- j : str YAML is string if `str_or_buffer` is not given. """ logger.debug('serializing LCM model {} to YAML'.format(self.name)) if (not isinstance(self.probability_mode, str) or not isinstance(self.choice_mode, str)): raise TypeError( 'Cannot serialize model with non-string probability_mode ' 'or choice_mode attributes.') return yamlio.convert_to_yaml(self.to_dict(), str_or_buffer)
python
def to_yaml(self, str_or_buffer=None): logger.debug('serializing LCM model {} to YAML'.format(self.name)) if (not isinstance(self.probability_mode, str) or not isinstance(self.choice_mode, str)): raise TypeError( 'Cannot serialize model with non-string probability_mode ' 'or choice_mode attributes.') return yamlio.convert_to_yaml(self.to_dict(), str_or_buffer)
[ "def", "to_yaml", "(", "self", ",", "str_or_buffer", "=", "None", ")", ":", "logger", ".", "debug", "(", "'serializing LCM model {} to YAML'", ".", "format", "(", "self", ".", "name", ")", ")", "if", "(", "not", "isinstance", "(", "self", ".", "probability_mode", ",", "str", ")", "or", "not", "isinstance", "(", "self", ".", "choice_mode", ",", "str", ")", ")", ":", "raise", "TypeError", "(", "'Cannot serialize model with non-string probability_mode '", "'or choice_mode attributes.'", ")", "return", "yamlio", ".", "convert_to_yaml", "(", "self", ".", "to_dict", "(", ")", ",", "str_or_buffer", ")" ]
Save a model respresentation to YAML. Parameters ---------- str_or_buffer : str or file like, optional By default a YAML string is returned. If a string is given here the YAML will be written to that file. If an object with a ``.write`` method is given the YAML will be written to that object. Returns ------- j : str YAML is string if `str_or_buffer` is not given.
[ "Save", "a", "model", "respresentation", "to", "YAML", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L686-L710
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.choosers_columns_used
def choosers_columns_used(self): """ Columns from the choosers table that are used for filtering. """ return list(tz.unique(tz.concatv( util.columns_in_filters(self.choosers_predict_filters), util.columns_in_filters(self.choosers_fit_filters))))
python
def choosers_columns_used(self): return list(tz.unique(tz.concatv( util.columns_in_filters(self.choosers_predict_filters), util.columns_in_filters(self.choosers_fit_filters))))
[ "def", "choosers_columns_used", "(", "self", ")", ":", "return", "list", "(", "tz", ".", "unique", "(", "tz", ".", "concatv", "(", "util", ".", "columns_in_filters", "(", "self", ".", "choosers_predict_filters", ")", ",", "util", ".", "columns_in_filters", "(", "self", ".", "choosers_fit_filters", ")", ")", ")", ")" ]
Columns from the choosers table that are used for filtering.
[ "Columns", "from", "the", "choosers", "table", "that", "are", "used", "for", "filtering", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L712-L719
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.alts_columns_used
def alts_columns_used(self): """ Columns from the alternatives table that are used for filtering. """ return list(tz.unique(tz.concatv( util.columns_in_filters(self.alts_predict_filters), util.columns_in_filters(self.alts_fit_filters))))
python
def alts_columns_used(self): return list(tz.unique(tz.concatv( util.columns_in_filters(self.alts_predict_filters), util.columns_in_filters(self.alts_fit_filters))))
[ "def", "alts_columns_used", "(", "self", ")", ":", "return", "list", "(", "tz", ".", "unique", "(", "tz", ".", "concatv", "(", "util", ".", "columns_in_filters", "(", "self", ".", "alts_predict_filters", ")", ",", "util", ".", "columns_in_filters", "(", "self", ".", "alts_fit_filters", ")", ")", ")", ")" ]
Columns from the alternatives table that are used for filtering.
[ "Columns", "from", "the", "alternatives", "table", "that", "are", "used", "for", "filtering", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L721-L728
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.interaction_columns_used
def interaction_columns_used(self): """ Columns from the interaction dataset used for filtering and in the model. These may come originally from either the choosers or alternatives tables. """ return list(tz.unique(tz.concatv( util.columns_in_filters(self.interaction_predict_filters), util.columns_in_formula(self.model_expression))))
python
def interaction_columns_used(self): return list(tz.unique(tz.concatv( util.columns_in_filters(self.interaction_predict_filters), util.columns_in_formula(self.model_expression))))
[ "def", "interaction_columns_used", "(", "self", ")", ":", "return", "list", "(", "tz", ".", "unique", "(", "tz", ".", "concatv", "(", "util", ".", "columns_in_filters", "(", "self", ".", "interaction_predict_filters", ")", ",", "util", ".", "columns_in_formula", "(", "self", ".", "model_expression", ")", ")", ")", ")" ]
Columns from the interaction dataset used for filtering and in the model. These may come originally from either the choosers or alternatives tables.
[ "Columns", "from", "the", "interaction", "dataset", "used", "for", "filtering", "and", "in", "the", "model", ".", "These", "may", "come", "originally", "from", "either", "the", "choosers", "or", "alternatives", "tables", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L730-L739
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.columns_used
def columns_used(self): """ Columns from any table used in the model. May come from either the choosers or alternatives tables. """ return list(tz.unique(tz.concatv( self.choosers_columns_used(), self.alts_columns_used(), self.interaction_columns_used())))
python
def columns_used(self): return list(tz.unique(tz.concatv( self.choosers_columns_used(), self.alts_columns_used(), self.interaction_columns_used())))
[ "def", "columns_used", "(", "self", ")", ":", "return", "list", "(", "tz", ".", "unique", "(", "tz", ".", "concatv", "(", "self", ".", "choosers_columns_used", "(", ")", ",", "self", ".", "alts_columns_used", "(", ")", ",", "self", ".", "interaction_columns_used", "(", ")", ")", ")", ")" ]
Columns from any table used in the model. May come from either the choosers or alternatives tables.
[ "Columns", "from", "any", "table", "used", "in", "the", "model", ".", "May", "come", "from", "either", "the", "choosers", "or", "alternatives", "tables", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L741-L750
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.fit_from_cfg
def fit_from_cfg(cls, choosers, chosen_fname, alternatives, cfgname, outcfgname=None): """ Parameters ---------- choosers : DataFrame A dataframe in which rows represent choosers. chosen_fname : string A string indicating the column in the choosers dataframe which gives which alternatives the choosers have chosen. alternatives : DataFrame A table of alternatives. It should include the choices from the choosers table as well as other alternatives from which to sample. Values in choosers[chosen_fname] should index into the alternatives dataframe. cfgname : string The name of the yaml config file from which to read the discrete choice model. outcfgname : string, optional (default cfgname) The name of the output yaml config file where estimation results are written into. Returns ------- lcm : MNLDiscreteChoiceModel which was used to fit """ logger.debug('start: fit from configuration {}'.format(cfgname)) lcm = cls.from_yaml(str_or_buffer=cfgname) lcm.fit(choosers, alternatives, choosers[chosen_fname]) lcm.report_fit() outcfgname = outcfgname or cfgname lcm.to_yaml(str_or_buffer=outcfgname) logger.debug('finish: fit into configuration {}'.format(outcfgname)) return lcm
python
def fit_from_cfg(cls, choosers, chosen_fname, alternatives, cfgname, outcfgname=None): logger.debug('start: fit from configuration {}'.format(cfgname)) lcm = cls.from_yaml(str_or_buffer=cfgname) lcm.fit(choosers, alternatives, choosers[chosen_fname]) lcm.report_fit() outcfgname = outcfgname or cfgname lcm.to_yaml(str_or_buffer=outcfgname) logger.debug('finish: fit into configuration {}'.format(outcfgname)) return lcm
[ "def", "fit_from_cfg", "(", "cls", ",", "choosers", ",", "chosen_fname", ",", "alternatives", ",", "cfgname", ",", "outcfgname", "=", "None", ")", ":", "logger", ".", "debug", "(", "'start: fit from configuration {}'", ".", "format", "(", "cfgname", ")", ")", "lcm", "=", "cls", ".", "from_yaml", "(", "str_or_buffer", "=", "cfgname", ")", "lcm", ".", "fit", "(", "choosers", ",", "alternatives", ",", "choosers", "[", "chosen_fname", "]", ")", "lcm", ".", "report_fit", "(", ")", "outcfgname", "=", "outcfgname", "or", "cfgname", "lcm", ".", "to_yaml", "(", "str_or_buffer", "=", "outcfgname", ")", "logger", ".", "debug", "(", "'finish: fit into configuration {}'", ".", "format", "(", "outcfgname", ")", ")", "return", "lcm" ]
Parameters ---------- choosers : DataFrame A dataframe in which rows represent choosers. chosen_fname : string A string indicating the column in the choosers dataframe which gives which alternatives the choosers have chosen. alternatives : DataFrame A table of alternatives. It should include the choices from the choosers table as well as other alternatives from which to sample. Values in choosers[chosen_fname] should index into the alternatives dataframe. cfgname : string The name of the yaml config file from which to read the discrete choice model. outcfgname : string, optional (default cfgname) The name of the output yaml config file where estimation results are written into. Returns ------- lcm : MNLDiscreteChoiceModel which was used to fit
[ "Parameters", "----------", "choosers", ":", "DataFrame", "A", "dataframe", "in", "which", "rows", "represent", "choosers", ".", "chosen_fname", ":", "string", "A", "string", "indicating", "the", "column", "in", "the", "choosers", "dataframe", "which", "gives", "which", "alternatives", "the", "choosers", "have", "chosen", ".", "alternatives", ":", "DataFrame", "A", "table", "of", "alternatives", ".", "It", "should", "include", "the", "choices", "from", "the", "choosers", "table", "as", "well", "as", "other", "alternatives", "from", "which", "to", "sample", ".", "Values", "in", "choosers", "[", "chosen_fname", "]", "should", "index", "into", "the", "alternatives", "dataframe", ".", "cfgname", ":", "string", "The", "name", "of", "the", "yaml", "config", "file", "from", "which", "to", "read", "the", "discrete", "choice", "model", ".", "outcfgname", ":", "string", "optional", "(", "default", "cfgname", ")", "The", "name", "of", "the", "output", "yaml", "config", "file", "where", "estimation", "results", "are", "written", "into", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L753-L784
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.predict_from_cfg
def predict_from_cfg(cls, choosers, alternatives, cfgname=None, cfg=None, alternative_ratio=2.0, debug=False): """ Simulate choices for the specified choosers Parameters ---------- choosers : DataFrame A dataframe of agents doing the choosing. alternatives : DataFrame A dataframe of locations which the choosers are locating in and which have a supply. cfgname : string The name of the yaml config file from which to read the discrete choice model. cfg: string an ordered yaml string of the model discrete choice model configuration. Used to read config from memory in lieu of loading cfgname from disk. alternative_ratio : float, optional Above the ratio of alternatives to choosers (default of 2.0), the alternatives will be sampled to meet this ratio (for performance reasons). debug : boolean, optional (default False) Whether to generate debug information on the model. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. lcm : MNLDiscreteChoiceModel which was used to predict """ logger.debug('start: predict from configuration {}'.format(cfgname)) if cfgname: lcm = cls.from_yaml(str_or_buffer=cfgname) elif cfg: lcm = cls.from_yaml(yaml_str=cfg) else: msg = 'predict_from_cfg requires a configuration via the cfgname or cfg arguments' logger.error(msg) raise ValueError(msg) if len(alternatives) > len(choosers) * alternative_ratio: logger.info( ("Alternative ratio exceeded: %d alternatives " "and only %d choosers") % (len(alternatives), len(choosers))) idxes = np.random.choice( alternatives.index, size=int(len(choosers) * alternative_ratio), replace=False) alternatives = alternatives.loc[idxes] logger.info( " after sampling %d alternatives are available\n" % len(alternatives)) new_units = lcm.predict(choosers, alternatives, debug=debug) print("Assigned %d choosers to new units" % len(new_units.dropna())) logger.debug('finish: predict from configuration {}'.format(cfgname)) return new_units, lcm
python
def predict_from_cfg(cls, choosers, alternatives, cfgname=None, cfg=None, alternative_ratio=2.0, debug=False): logger.debug('start: predict from configuration {}'.format(cfgname)) if cfgname: lcm = cls.from_yaml(str_or_buffer=cfgname) elif cfg: lcm = cls.from_yaml(yaml_str=cfg) else: msg = 'predict_from_cfg requires a configuration via the cfgname or cfg arguments' logger.error(msg) raise ValueError(msg) if len(alternatives) > len(choosers) * alternative_ratio: logger.info( ("Alternative ratio exceeded: %d alternatives " "and only %d choosers") % (len(alternatives), len(choosers))) idxes = np.random.choice( alternatives.index, size=int(len(choosers) * alternative_ratio), replace=False) alternatives = alternatives.loc[idxes] logger.info( " after sampling %d alternatives are available\n" % len(alternatives)) new_units = lcm.predict(choosers, alternatives, debug=debug) print("Assigned %d choosers to new units" % len(new_units.dropna())) logger.debug('finish: predict from configuration {}'.format(cfgname)) return new_units, lcm
[ "def", "predict_from_cfg", "(", "cls", ",", "choosers", ",", "alternatives", ",", "cfgname", "=", "None", ",", "cfg", "=", "None", ",", "alternative_ratio", "=", "2.0", ",", "debug", "=", "False", ")", ":", "logger", ".", "debug", "(", "'start: predict from configuration {}'", ".", "format", "(", "cfgname", ")", ")", "if", "cfgname", ":", "lcm", "=", "cls", ".", "from_yaml", "(", "str_or_buffer", "=", "cfgname", ")", "elif", "cfg", ":", "lcm", "=", "cls", ".", "from_yaml", "(", "yaml_str", "=", "cfg", ")", "else", ":", "msg", "=", "'predict_from_cfg requires a configuration via the cfgname or cfg arguments'", "logger", ".", "error", "(", "msg", ")", "raise", "ValueError", "(", "msg", ")", "if", "len", "(", "alternatives", ")", ">", "len", "(", "choosers", ")", "*", "alternative_ratio", ":", "logger", ".", "info", "(", "(", "\"Alternative ratio exceeded: %d alternatives \"", "\"and only %d choosers\"", ")", "%", "(", "len", "(", "alternatives", ")", ",", "len", "(", "choosers", ")", ")", ")", "idxes", "=", "np", ".", "random", ".", "choice", "(", "alternatives", ".", "index", ",", "size", "=", "int", "(", "len", "(", "choosers", ")", "*", "alternative_ratio", ")", ",", "replace", "=", "False", ")", "alternatives", "=", "alternatives", ".", "loc", "[", "idxes", "]", "logger", ".", "info", "(", "\" after sampling %d alternatives are available\\n\"", "%", "len", "(", "alternatives", ")", ")", "new_units", "=", "lcm", ".", "predict", "(", "choosers", ",", "alternatives", ",", "debug", "=", "debug", ")", "print", "(", "\"Assigned %d choosers to new units\"", "%", "len", "(", "new_units", ".", "dropna", "(", ")", ")", ")", "logger", ".", "debug", "(", "'finish: predict from configuration {}'", ".", "format", "(", "cfgname", ")", ")", "return", "new_units", ",", "lcm" ]
Simulate choices for the specified choosers Parameters ---------- choosers : DataFrame A dataframe of agents doing the choosing. alternatives : DataFrame A dataframe of locations which the choosers are locating in and which have a supply. cfgname : string The name of the yaml config file from which to read the discrete choice model. cfg: string an ordered yaml string of the model discrete choice model configuration. Used to read config from memory in lieu of loading cfgname from disk. alternative_ratio : float, optional Above the ratio of alternatives to choosers (default of 2.0), the alternatives will be sampled to meet this ratio (for performance reasons). debug : boolean, optional (default False) Whether to generate debug information on the model. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. lcm : MNLDiscreteChoiceModel which was used to predict
[ "Simulate", "choices", "for", "the", "specified", "choosers" ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L787-L847
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.add_model_from_params
def add_model_from_params( self, name, model_expression, sample_size, probability_mode='full_product', choice_mode='individual', choosers_fit_filters=None, choosers_predict_filters=None, alts_fit_filters=None, alts_predict_filters=None, interaction_predict_filters=None, estimation_sample_size=None, prediction_sample_size=None, choice_column=None): """ Add a model by passing parameters through to MNLDiscreteChoiceModel. Parameters ---------- name Must match a segment in the choosers table. model_expression : str, iterable, or dict A patsy model expression. Should contain only a right-hand side. sample_size : int Number of choices to sample for estimating the model. probability_mode : str, optional Specify the method to use for calculating probabilities during prediction. Available string options are 'single_chooser' and 'full_product'. In "single chooser" mode one agent is chosen for calculating probabilities across all alternatives. In "full product" mode probabilities are calculated for every chooser across all alternatives. choice_mode : str or callable, optional Specify the method to use for making choices among alternatives. Available string options are 'individual' and 'aggregate'. In "individual" mode choices will be made separately for each chooser. In "aggregate" mode choices are made for all choosers at once. Aggregate mode implies that an alternative chosen by one agent is unavailable to other agents and that the same probabilities can be used for all choosers. choosers_fit_filters : list of str, optional Filters applied to choosers table before fitting the model. choosers_predict_filters : list of str, optional Filters applied to the choosers table before calculating new data points. alts_fit_filters : list of str, optional Filters applied to the alternatives table before fitting the model. alts_predict_filters : list of str, optional Filters applied to the alternatives table before calculating new data points. interaction_predict_filters : list of str, optional Filters applied to the merged choosers/alternatives table before predicting agent choices. estimation_sample_size : int, optional Whether to sample choosers during estimation (needs to be applied after choosers_fit_filters) prediction_sample_size : int, optional Whether (and how much) to sample alternatives during prediction. Note that this can lead to multiple choosers picking the same alternative. choice_column : optional Name of the column in the `alternatives` table that choosers should choose. e.g. the 'building_id' column. If not provided the alternatives index is used. """ logger.debug('adding model {} to LCM group {}'.format(name, self.name)) self.models[name] = MNLDiscreteChoiceModel( model_expression, sample_size, probability_mode, choice_mode, choosers_fit_filters, choosers_predict_filters, alts_fit_filters, alts_predict_filters, interaction_predict_filters, estimation_sample_size, prediction_sample_size, choice_column, name)
python
def add_model_from_params( self, name, model_expression, sample_size, probability_mode='full_product', choice_mode='individual', choosers_fit_filters=None, choosers_predict_filters=None, alts_fit_filters=None, alts_predict_filters=None, interaction_predict_filters=None, estimation_sample_size=None, prediction_sample_size=None, choice_column=None): logger.debug('adding model {} to LCM group {}'.format(name, self.name)) self.models[name] = MNLDiscreteChoiceModel( model_expression, sample_size, probability_mode, choice_mode, choosers_fit_filters, choosers_predict_filters, alts_fit_filters, alts_predict_filters, interaction_predict_filters, estimation_sample_size, prediction_sample_size, choice_column, name)
[ "def", "add_model_from_params", "(", "self", ",", "name", ",", "model_expression", ",", "sample_size", ",", "probability_mode", "=", "'full_product'", ",", "choice_mode", "=", "'individual'", ",", "choosers_fit_filters", "=", "None", ",", "choosers_predict_filters", "=", "None", ",", "alts_fit_filters", "=", "None", ",", "alts_predict_filters", "=", "None", ",", "interaction_predict_filters", "=", "None", ",", "estimation_sample_size", "=", "None", ",", "prediction_sample_size", "=", "None", ",", "choice_column", "=", "None", ")", ":", "logger", ".", "debug", "(", "'adding model {} to LCM group {}'", ".", "format", "(", "name", ",", "self", ".", "name", ")", ")", "self", ".", "models", "[", "name", "]", "=", "MNLDiscreteChoiceModel", "(", "model_expression", ",", "sample_size", ",", "probability_mode", ",", "choice_mode", ",", "choosers_fit_filters", ",", "choosers_predict_filters", ",", "alts_fit_filters", ",", "alts_predict_filters", ",", "interaction_predict_filters", ",", "estimation_sample_size", ",", "prediction_sample_size", ",", "choice_column", ",", "name", ")" ]
Add a model by passing parameters through to MNLDiscreteChoiceModel. Parameters ---------- name Must match a segment in the choosers table. model_expression : str, iterable, or dict A patsy model expression. Should contain only a right-hand side. sample_size : int Number of choices to sample for estimating the model. probability_mode : str, optional Specify the method to use for calculating probabilities during prediction. Available string options are 'single_chooser' and 'full_product'. In "single chooser" mode one agent is chosen for calculating probabilities across all alternatives. In "full product" mode probabilities are calculated for every chooser across all alternatives. choice_mode : str or callable, optional Specify the method to use for making choices among alternatives. Available string options are 'individual' and 'aggregate'. In "individual" mode choices will be made separately for each chooser. In "aggregate" mode choices are made for all choosers at once. Aggregate mode implies that an alternative chosen by one agent is unavailable to other agents and that the same probabilities can be used for all choosers. choosers_fit_filters : list of str, optional Filters applied to choosers table before fitting the model. choosers_predict_filters : list of str, optional Filters applied to the choosers table before calculating new data points. alts_fit_filters : list of str, optional Filters applied to the alternatives table before fitting the model. alts_predict_filters : list of str, optional Filters applied to the alternatives table before calculating new data points. interaction_predict_filters : list of str, optional Filters applied to the merged choosers/alternatives table before predicting agent choices. estimation_sample_size : int, optional Whether to sample choosers during estimation (needs to be applied after choosers_fit_filters) prediction_sample_size : int, optional Whether (and how much) to sample alternatives during prediction. Note that this can lead to multiple choosers picking the same alternative. choice_column : optional Name of the column in the `alternatives` table that choosers should choose. e.g. the 'building_id' column. If not provided the alternatives index is used.
[ "Add", "a", "model", "by", "passing", "parameters", "through", "to", "MNLDiscreteChoiceModel", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L893-L960
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup._iter_groups
def _iter_groups(self, data): """ Iterate over the groups in `data` after grouping by `segmentation_col`. Skips any groups for which there is no model stored. Yields tuples of (name, df) where name is the group key and df is the group DataFrame. Parameters ---------- data : pandas.DataFrame Must have a column with the same name as `segmentation_col`. """ groups = data.groupby(self.segmentation_col) for name, group in groups: if name not in self.models: continue logger.debug( 'returning group {} in LCM group {}'.format(name, self.name)) yield name, group
python
def _iter_groups(self, data): groups = data.groupby(self.segmentation_col) for name, group in groups: if name not in self.models: continue logger.debug( 'returning group {} in LCM group {}'.format(name, self.name)) yield name, group
[ "def", "_iter_groups", "(", "self", ",", "data", ")", ":", "groups", "=", "data", ".", "groupby", "(", "self", ".", "segmentation_col", ")", "for", "name", ",", "group", "in", "groups", ":", "if", "name", "not", "in", "self", ".", "models", ":", "continue", "logger", ".", "debug", "(", "'returning group {} in LCM group {}'", ".", "format", "(", "name", ",", "self", ".", "name", ")", ")", "yield", "name", ",", "group" ]
Iterate over the groups in `data` after grouping by `segmentation_col`. Skips any groups for which there is no model stored. Yields tuples of (name, df) where name is the group key and df is the group DataFrame. Parameters ---------- data : pandas.DataFrame Must have a column with the same name as `segmentation_col`.
[ "Iterate", "over", "the", "groups", "in", "data", "after", "grouping", "by", "segmentation_col", ".", "Skips", "any", "groups", "for", "which", "there", "is", "no", "model", "stored", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L962-L984
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.apply_fit_filters
def apply_fit_filters(self, choosers, alternatives): """ Filter `choosers` and `alternatives` for fitting. This is done by filtering each submodel and concatenating the results. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame """ ch = [] alts = [] for name, df in self._iter_groups(choosers): filtered_choosers, filtered_alts = \ self.models[name].apply_fit_filters(df, alternatives) ch.append(filtered_choosers) alts.append(filtered_alts) return pd.concat(ch), pd.concat(alts)
python
def apply_fit_filters(self, choosers, alternatives): ch = [] alts = [] for name, df in self._iter_groups(choosers): filtered_choosers, filtered_alts = \ self.models[name].apply_fit_filters(df, alternatives) ch.append(filtered_choosers) alts.append(filtered_alts) return pd.concat(ch), pd.concat(alts)
[ "def", "apply_fit_filters", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "ch", "=", "[", "]", "alts", "=", "[", "]", "for", "name", ",", "df", "in", "self", ".", "_iter_groups", "(", "choosers", ")", ":", "filtered_choosers", ",", "filtered_alts", "=", "self", ".", "models", "[", "name", "]", ".", "apply_fit_filters", "(", "df", ",", "alternatives", ")", "ch", ".", "append", "(", "filtered_choosers", ")", "alts", ".", "append", "(", "filtered_alts", ")", "return", "pd", ".", "concat", "(", "ch", ")", ",", "pd", ".", "concat", "(", "alts", ")" ]
Filter `choosers` and `alternatives` for fitting. This is done by filtering each submodel and concatenating the results. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame
[ "Filter", "choosers", "and", "alternatives", "for", "fitting", ".", "This", "is", "done", "by", "filtering", "each", "submodel", "and", "concatenating", "the", "results", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L986-L1014
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.fit
def fit(self, choosers, alternatives, current_choice): """ Fit and save models based on given data after segmenting the `choosers` table. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column with the same name as the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice Name of column in `choosers` that indicates which alternative they have currently chosen. Returns ------- log_likelihoods : dict of dict Keys will be model names and values will be dictionaries of log-liklihood values as returned by MNLDiscreteChoiceModel.fit. """ with log_start_finish( 'fit models in LCM group {}'.format(self.name), logger): return { name: self.models[name].fit(df, alternatives, current_choice) for name, df in self._iter_groups(choosers)}
python
def fit(self, choosers, alternatives, current_choice): with log_start_finish( 'fit models in LCM group {}'.format(self.name), logger): return { name: self.models[name].fit(df, alternatives, current_choice) for name, df in self._iter_groups(choosers)}
[ "def", "fit", "(", "self", ",", "choosers", ",", "alternatives", ",", "current_choice", ")", ":", "with", "log_start_finish", "(", "'fit models in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ",", "logger", ")", ":", "return", "{", "name", ":", "self", ".", "models", "[", "name", "]", ".", "fit", "(", "df", ",", "alternatives", ",", "current_choice", ")", "for", "name", ",", "df", "in", "self", ".", "_iter_groups", "(", "choosers", ")", "}" ]
Fit and save models based on given data after segmenting the `choosers` table. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column with the same name as the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice Name of column in `choosers` that indicates which alternative they have currently chosen. Returns ------- log_likelihoods : dict of dict Keys will be model names and values will be dictionaries of log-liklihood values as returned by MNLDiscreteChoiceModel.fit.
[ "Fit", "and", "save", "models", "based", "on", "given", "data", "after", "segmenting", "the", "choosers", "table", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1049-L1078
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.fitted
def fitted(self): """ Whether all models in the group have been fitted. """ return (all(m.fitted for m in self.models.values()) if self.models else False)
python
def fitted(self): return (all(m.fitted for m in self.models.values()) if self.models else False)
[ "def", "fitted", "(", "self", ")", ":", "return", "(", "all", "(", "m", ".", "fitted", "for", "m", "in", "self", ".", "models", ".", "values", "(", ")", ")", "if", "self", ".", "models", "else", "False", ")" ]
Whether all models in the group have been fitted.
[ "Whether", "all", "models", "in", "the", "group", "have", "been", "fitted", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1081-L1087
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.probabilities
def probabilities(self, choosers, alternatives): """ Returns alternative probabilties for each chooser segment as a dictionary keyed by segment name. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probabilties : dict of pandas.Series """ logger.debug( 'start: calculate probabilities in LCM group {}'.format(self.name)) probs = {} for name, df in self._iter_groups(choosers): probs[name] = self.models[name].probabilities(df, alternatives) logger.debug( 'finish: calculate probabilities in LCM group {}'.format( self.name)) return probs
python
def probabilities(self, choosers, alternatives): logger.debug( 'start: calculate probabilities in LCM group {}'.format(self.name)) probs = {} for name, df in self._iter_groups(choosers): probs[name] = self.models[name].probabilities(df, alternatives) logger.debug( 'finish: calculate probabilities in LCM group {}'.format( self.name)) return probs
[ "def", "probabilities", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "logger", ".", "debug", "(", "'start: calculate probabilities in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ")", "probs", "=", "{", "}", "for", "name", ",", "df", "in", "self", ".", "_iter_groups", "(", "choosers", ")", ":", "probs", "[", "name", "]", "=", "self", ".", "models", "[", "name", "]", ".", "probabilities", "(", "df", ",", "alternatives", ")", "logger", ".", "debug", "(", "'finish: calculate probabilities in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "probs" ]
Returns alternative probabilties for each chooser segment as a dictionary keyed by segment name. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probabilties : dict of pandas.Series
[ "Returns", "alternative", "probabilties", "for", "each", "chooser", "segment", "as", "a", "dictionary", "keyed", "by", "segment", "name", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1089-L1117
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.summed_probabilities
def summed_probabilities(self, choosers, alternatives): """ Returns the sum of probabilities for alternatives across all chooser segments. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Summed probabilities from each segment added together. """ if len(alternatives) == 0 or len(choosers) == 0: return pd.Series() logger.debug( 'start: calculate summed probabilities in LCM group {}'.format( self.name)) probs = [] for name, df in self._iter_groups(choosers): probs.append( self.models[name].summed_probabilities(df, alternatives)) add = tz.curry(pd.Series.add, fill_value=0) probs = tz.reduce(add, probs) logger.debug( 'finish: calculate summed probabilities in LCM group {}'.format( self.name)) return probs
python
def summed_probabilities(self, choosers, alternatives): if len(alternatives) == 0 or len(choosers) == 0: return pd.Series() logger.debug( 'start: calculate summed probabilities in LCM group {}'.format( self.name)) probs = [] for name, df in self._iter_groups(choosers): probs.append( self.models[name].summed_probabilities(df, alternatives)) add = tz.curry(pd.Series.add, fill_value=0) probs = tz.reduce(add, probs) logger.debug( 'finish: calculate summed probabilities in LCM group {}'.format( self.name)) return probs
[ "def", "summed_probabilities", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "if", "len", "(", "alternatives", ")", "==", "0", "or", "len", "(", "choosers", ")", "==", "0", ":", "return", "pd", ".", "Series", "(", ")", "logger", ".", "debug", "(", "'start: calculate summed probabilities in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ")", "probs", "=", "[", "]", "for", "name", ",", "df", "in", "self", ".", "_iter_groups", "(", "choosers", ")", ":", "probs", ".", "append", "(", "self", ".", "models", "[", "name", "]", ".", "summed_probabilities", "(", "df", ",", "alternatives", ")", ")", "add", "=", "tz", ".", "curry", "(", "pd", ".", "Series", ".", "add", ",", "fill_value", "=", "0", ")", "probs", "=", "tz", ".", "reduce", "(", "add", ",", "probs", ")", "logger", ".", "debug", "(", "'finish: calculate summed probabilities in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "probs" ]
Returns the sum of probabilities for alternatives across all chooser segments. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Summed probabilities from each segment added together.
[ "Returns", "the", "sum", "of", "probabilities", "for", "alternatives", "across", "all", "chooser", "segments", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1119-L1156
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.predict
def predict(self, choosers, alternatives, debug=False): """ Choose from among alternatives for a group of agents after segmenting the `choosers` table. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. debug : bool If debug is set to true, will set the variable "sim_pdf" on the object to store the probabilities for mapping of the outcome. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. """ logger.debug('start: predict models in LCM group {}'.format(self.name)) results = [] for name, df in self._iter_groups(choosers): choices = self.models[name].predict(df, alternatives, debug=debug) if self.remove_alts and len(alternatives) > 0: alternatives = alternatives.loc[ ~alternatives.index.isin(choices)] results.append(choices) logger.debug( 'finish: predict models in LCM group {}'.format(self.name)) return pd.concat(results) if results else pd.Series()
python
def predict(self, choosers, alternatives, debug=False): logger.debug('start: predict models in LCM group {}'.format(self.name)) results = [] for name, df in self._iter_groups(choosers): choices = self.models[name].predict(df, alternatives, debug=debug) if self.remove_alts and len(alternatives) > 0: alternatives = alternatives.loc[ ~alternatives.index.isin(choices)] results.append(choices) logger.debug( 'finish: predict models in LCM group {}'.format(self.name)) return pd.concat(results) if results else pd.Series()
[ "def", "predict", "(", "self", ",", "choosers", ",", "alternatives", ",", "debug", "=", "False", ")", ":", "logger", ".", "debug", "(", "'start: predict models in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ")", "results", "=", "[", "]", "for", "name", ",", "df", "in", "self", ".", "_iter_groups", "(", "choosers", ")", ":", "choices", "=", "self", ".", "models", "[", "name", "]", ".", "predict", "(", "df", ",", "alternatives", ",", "debug", "=", "debug", ")", "if", "self", ".", "remove_alts", "and", "len", "(", "alternatives", ")", ">", "0", ":", "alternatives", "=", "alternatives", ".", "loc", "[", "~", "alternatives", ".", "index", ".", "isin", "(", "choices", ")", "]", "results", ".", "append", "(", "choices", ")", "logger", ".", "debug", "(", "'finish: predict models in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "pd", ".", "concat", "(", "results", ")", "if", "results", "else", "pd", ".", "Series", "(", ")" ]
Choose from among alternatives for a group of agents after segmenting the `choosers` table. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. debug : bool If debug is set to true, will set the variable "sim_pdf" on the object to store the probabilities for mapping of the outcome. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers.
[ "Choose", "from", "among", "alternatives", "for", "a", "group", "of", "agents", "after", "segmenting", "the", "choosers", "table", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1158-L1195
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModelGroup.alts_columns_used
def alts_columns_used(self): """ Columns from the alternatives table that are used for filtering. """ return list(tz.unique(tz.concat( m.alts_columns_used() for m in self.models.values())))
python
def alts_columns_used(self): return list(tz.unique(tz.concat( m.alts_columns_used() for m in self.models.values())))
[ "def", "alts_columns_used", "(", "self", ")", ":", "return", "list", "(", "tz", ".", "unique", "(", "tz", ".", "concat", "(", "m", ".", "alts_columns_used", "(", ")", "for", "m", "in", "self", ".", "models", ".", "values", "(", ")", ")", ")", ")" ]
Columns from the alternatives table that are used for filtering.
[ "Columns", "from", "the", "alternatives", "table", "that", "are", "used", "for", "filtering", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1205-L1211
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.from_yaml
def from_yaml(cls, yaml_str=None, str_or_buffer=None): """ Create a SegmentedMNLDiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- SegmentedMNLDiscreteChoiceModel """ cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) default_model_expr = cfg['default_config']['model_expression'] seg = cls( cfg['segmentation_col'], cfg['sample_size'], cfg['probability_mode'], cfg['choice_mode'], cfg['choosers_fit_filters'], cfg['choosers_predict_filters'], cfg['alts_fit_filters'], cfg['alts_predict_filters'], cfg['interaction_predict_filters'], cfg['estimation_sample_size'], cfg['prediction_sample_size'], cfg['choice_column'], default_model_expr, cfg['remove_alts'], cfg['name']) if "models" not in cfg: cfg["models"] = {} for name, m in cfg['models'].items(): m['model_expression'] = m.get( 'model_expression', default_model_expr) m['sample_size'] = cfg['sample_size'] m['probability_mode'] = cfg['probability_mode'] m['choice_mode'] = cfg['choice_mode'] m['choosers_fit_filters'] = None m['choosers_predict_filters'] = None m['alts_fit_filters'] = None m['alts_predict_filters'] = None m['interaction_predict_filters'] = \ cfg['interaction_predict_filters'] m['estimation_sample_size'] = cfg['estimation_sample_size'] m['prediction_sample_size'] = cfg['prediction_sample_size'] m['choice_column'] = cfg['choice_column'] model = MNLDiscreteChoiceModel.from_yaml( yamlio.convert_to_yaml(m, None)) seg._group.add_model(model) logger.debug( 'loaded segmented LCM model {} from YAML'.format(seg.name)) return seg
python
def from_yaml(cls, yaml_str=None, str_or_buffer=None): cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) default_model_expr = cfg['default_config']['model_expression'] seg = cls( cfg['segmentation_col'], cfg['sample_size'], cfg['probability_mode'], cfg['choice_mode'], cfg['choosers_fit_filters'], cfg['choosers_predict_filters'], cfg['alts_fit_filters'], cfg['alts_predict_filters'], cfg['interaction_predict_filters'], cfg['estimation_sample_size'], cfg['prediction_sample_size'], cfg['choice_column'], default_model_expr, cfg['remove_alts'], cfg['name']) if "models" not in cfg: cfg["models"] = {} for name, m in cfg['models'].items(): m['model_expression'] = m.get( 'model_expression', default_model_expr) m['sample_size'] = cfg['sample_size'] m['probability_mode'] = cfg['probability_mode'] m['choice_mode'] = cfg['choice_mode'] m['choosers_fit_filters'] = None m['choosers_predict_filters'] = None m['alts_fit_filters'] = None m['alts_predict_filters'] = None m['interaction_predict_filters'] = \ cfg['interaction_predict_filters'] m['estimation_sample_size'] = cfg['estimation_sample_size'] m['prediction_sample_size'] = cfg['prediction_sample_size'] m['choice_column'] = cfg['choice_column'] model = MNLDiscreteChoiceModel.from_yaml( yamlio.convert_to_yaml(m, None)) seg._group.add_model(model) logger.debug( 'loaded segmented LCM model {} from YAML'.format(seg.name)) return seg
[ "def", "from_yaml", "(", "cls", ",", "yaml_str", "=", "None", ",", "str_or_buffer", "=", "None", ")", ":", "cfg", "=", "yamlio", ".", "yaml_to_dict", "(", "yaml_str", ",", "str_or_buffer", ")", "default_model_expr", "=", "cfg", "[", "'default_config'", "]", "[", "'model_expression'", "]", "seg", "=", "cls", "(", "cfg", "[", "'segmentation_col'", "]", ",", "cfg", "[", "'sample_size'", "]", ",", "cfg", "[", "'probability_mode'", "]", ",", "cfg", "[", "'choice_mode'", "]", ",", "cfg", "[", "'choosers_fit_filters'", "]", ",", "cfg", "[", "'choosers_predict_filters'", "]", ",", "cfg", "[", "'alts_fit_filters'", "]", ",", "cfg", "[", "'alts_predict_filters'", "]", ",", "cfg", "[", "'interaction_predict_filters'", "]", ",", "cfg", "[", "'estimation_sample_size'", "]", ",", "cfg", "[", "'prediction_sample_size'", "]", ",", "cfg", "[", "'choice_column'", "]", ",", "default_model_expr", ",", "cfg", "[", "'remove_alts'", "]", ",", "cfg", "[", "'name'", "]", ")", "if", "\"models\"", "not", "in", "cfg", ":", "cfg", "[", "\"models\"", "]", "=", "{", "}", "for", "name", ",", "m", "in", "cfg", "[", "'models'", "]", ".", "items", "(", ")", ":", "m", "[", "'model_expression'", "]", "=", "m", ".", "get", "(", "'model_expression'", ",", "default_model_expr", ")", "m", "[", "'sample_size'", "]", "=", "cfg", "[", "'sample_size'", "]", "m", "[", "'probability_mode'", "]", "=", "cfg", "[", "'probability_mode'", "]", "m", "[", "'choice_mode'", "]", "=", "cfg", "[", "'choice_mode'", "]", "m", "[", "'choosers_fit_filters'", "]", "=", "None", "m", "[", "'choosers_predict_filters'", "]", "=", "None", "m", "[", "'alts_fit_filters'", "]", "=", "None", "m", "[", "'alts_predict_filters'", "]", "=", "None", "m", "[", "'interaction_predict_filters'", "]", "=", "cfg", "[", "'interaction_predict_filters'", "]", "m", "[", "'estimation_sample_size'", "]", "=", "cfg", "[", "'estimation_sample_size'", "]", "m", "[", "'prediction_sample_size'", "]", "=", "cfg", "[", "'prediction_sample_size'", "]", "m", "[", "'choice_column'", "]", "=", "cfg", "[", "'choice_column'", "]", "model", "=", "MNLDiscreteChoiceModel", ".", "from_yaml", "(", "yamlio", ".", "convert_to_yaml", "(", "m", ",", "None", ")", ")", "seg", ".", "_group", ".", "add_model", "(", "model", ")", "logger", ".", "debug", "(", "'loaded segmented LCM model {} from YAML'", ".", "format", "(", "seg", ".", "name", ")", ")", "return", "seg" ]
Create a SegmentedMNLDiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- SegmentedMNLDiscreteChoiceModel
[ "Create", "a", "SegmentedMNLDiscreteChoiceModel", "instance", "from", "a", "saved", "YAML", "configuration", ".", "Arguments", "are", "mutally", "exclusive", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1334-L1397
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.add_segment
def add_segment(self, name, model_expression=None): """ Add a new segment with its own model expression. Parameters ---------- name Segment name. Must match a segment in the groupby of the data. model_expression : str or dict, optional A patsy model expression that can be used with statsmodels. Should contain both the left- and right-hand sides. If not given the default model will be used, which must not be None. """ logger.debug('adding LCM model {} to segmented model {}'.format( name, self.name)) if not model_expression: if not self.default_model_expr: raise ValueError( 'No default model available, ' 'you must supply a model expression.') model_expression = self.default_model_expr # we'll take care of some of the filtering this side before # segmentation self._group.add_model_from_params( name=name, model_expression=model_expression, sample_size=self.sample_size, probability_mode=self.probability_mode, choice_mode=self.choice_mode, choosers_fit_filters=None, choosers_predict_filters=None, alts_fit_filters=None, alts_predict_filters=None, interaction_predict_filters=self.interaction_predict_filters, estimation_sample_size=self.estimation_sample_size, choice_column=self.choice_column)
python
def add_segment(self, name, model_expression=None): logger.debug('adding LCM model {} to segmented model {}'.format( name, self.name)) if not model_expression: if not self.default_model_expr: raise ValueError( 'No default model available, ' 'you must supply a model expression.') model_expression = self.default_model_expr self._group.add_model_from_params( name=name, model_expression=model_expression, sample_size=self.sample_size, probability_mode=self.probability_mode, choice_mode=self.choice_mode, choosers_fit_filters=None, choosers_predict_filters=None, alts_fit_filters=None, alts_predict_filters=None, interaction_predict_filters=self.interaction_predict_filters, estimation_sample_size=self.estimation_sample_size, choice_column=self.choice_column)
[ "def", "add_segment", "(", "self", ",", "name", ",", "model_expression", "=", "None", ")", ":", "logger", ".", "debug", "(", "'adding LCM model {} to segmented model {}'", ".", "format", "(", "name", ",", "self", ".", "name", ")", ")", "if", "not", "model_expression", ":", "if", "not", "self", ".", "default_model_expr", ":", "raise", "ValueError", "(", "'No default model available, '", "'you must supply a model expression.'", ")", "model_expression", "=", "self", ".", "default_model_expr", "# we'll take care of some of the filtering this side before", "# segmentation", "self", ".", "_group", ".", "add_model_from_params", "(", "name", "=", "name", ",", "model_expression", "=", "model_expression", ",", "sample_size", "=", "self", ".", "sample_size", ",", "probability_mode", "=", "self", ".", "probability_mode", ",", "choice_mode", "=", "self", ".", "choice_mode", ",", "choosers_fit_filters", "=", "None", ",", "choosers_predict_filters", "=", "None", ",", "alts_fit_filters", "=", "None", ",", "alts_predict_filters", "=", "None", ",", "interaction_predict_filters", "=", "self", ".", "interaction_predict_filters", ",", "estimation_sample_size", "=", "self", ".", "estimation_sample_size", ",", "choice_column", "=", "self", ".", "choice_column", ")" ]
Add a new segment with its own model expression. Parameters ---------- name Segment name. Must match a segment in the groupby of the data. model_expression : str or dict, optional A patsy model expression that can be used with statsmodels. Should contain both the left- and right-hand sides. If not given the default model will be used, which must not be None.
[ "Add", "a", "new", "segment", "with", "its", "own", "model", "expression", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1399-L1437
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.apply_fit_filters
def apply_fit_filters(self, choosers, alternatives): """ Filter `choosers` and `alternatives` for fitting. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame """ return super(SegmentedMNLDiscreteChoiceModel, self).apply_fit_filters( choosers, alternatives)
python
def apply_fit_filters(self, choosers, alternatives): return super(SegmentedMNLDiscreteChoiceModel, self).apply_fit_filters( choosers, alternatives)
[ "def", "apply_fit_filters", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "return", "super", "(", "SegmentedMNLDiscreteChoiceModel", ",", "self", ")", ".", "apply_fit_filters", "(", "choosers", ",", "alternatives", ")" ]
Filter `choosers` and `alternatives` for fitting. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame
[ "Filter", "choosers", "and", "alternatives", "for", "fitting", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1439-L1457
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.apply_predict_filters
def apply_predict_filters(self, choosers, alternatives): """ Filter `choosers` and `alternatives` for prediction. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame """ return super( SegmentedMNLDiscreteChoiceModel, self ).apply_predict_filters(choosers, alternatives)
python
def apply_predict_filters(self, choosers, alternatives): return super( SegmentedMNLDiscreteChoiceModel, self ).apply_predict_filters(choosers, alternatives)
[ "def", "apply_predict_filters", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "return", "super", "(", "SegmentedMNLDiscreteChoiceModel", ",", "self", ")", ".", "apply_predict_filters", "(", "choosers", ",", "alternatives", ")" ]
Filter `choosers` and `alternatives` for prediction. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. Returns ------- filtered_choosers, filtered_alts : pandas.DataFrame
[ "Filter", "choosers", "and", "alternatives", "for", "prediction", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1459-L1478
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.fit
def fit(self, choosers, alternatives, current_choice): """ Fit and save models based on given data after segmenting the `choosers` table. Segments that have not already been explicitly added will be automatically added with default model. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column with the same name as the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice Name of column in `choosers` that indicates which alternative they have currently chosen. Returns ------- log_likelihoods : dict of dict Keys will be model names and values will be dictionaries of log-liklihood values as returned by MNLDiscreteChoiceModel.fit. """ logger.debug('start: fit models in segmented LCM {}'.format(self.name)) choosers, alternatives = self.apply_fit_filters(choosers, alternatives) unique = choosers[self.segmentation_col].unique() # Remove any existing segments that may no longer have counterparts # in the data. This can happen when loading a saved model and then # calling this method with data that no longer has segments that # were there the last time this was called. gone = set(self._group.models) - set(unique) for g in gone: del self._group.models[g] for x in unique: if x not in self._group.models: self.add_segment(x) results = self._group.fit(choosers, alternatives, current_choice) logger.debug( 'finish: fit models in segmented LCM {}'.format(self.name)) return results
python
def fit(self, choosers, alternatives, current_choice): logger.debug('start: fit models in segmented LCM {}'.format(self.name)) choosers, alternatives = self.apply_fit_filters(choosers, alternatives) unique = choosers[self.segmentation_col].unique() gone = set(self._group.models) - set(unique) for g in gone: del self._group.models[g] for x in unique: if x not in self._group.models: self.add_segment(x) results = self._group.fit(choosers, alternatives, current_choice) logger.debug( 'finish: fit models in segmented LCM {}'.format(self.name)) return results
[ "def", "fit", "(", "self", ",", "choosers", ",", "alternatives", ",", "current_choice", ")", ":", "logger", ".", "debug", "(", "'start: fit models in segmented LCM {}'", ".", "format", "(", "self", ".", "name", ")", ")", "choosers", ",", "alternatives", "=", "self", ".", "apply_fit_filters", "(", "choosers", ",", "alternatives", ")", "unique", "=", "choosers", "[", "self", ".", "segmentation_col", "]", ".", "unique", "(", ")", "# Remove any existing segments that may no longer have counterparts", "# in the data. This can happen when loading a saved model and then", "# calling this method with data that no longer has segments that", "# were there the last time this was called.", "gone", "=", "set", "(", "self", ".", "_group", ".", "models", ")", "-", "set", "(", "unique", ")", "for", "g", "in", "gone", ":", "del", "self", ".", "_group", ".", "models", "[", "g", "]", "for", "x", "in", "unique", ":", "if", "x", "not", "in", "self", ".", "_group", ".", "models", ":", "self", ".", "add_segment", "(", "x", ")", "results", "=", "self", ".", "_group", ".", "fit", "(", "choosers", ",", "alternatives", ",", "current_choice", ")", "logger", ".", "debug", "(", "'finish: fit models in segmented LCM {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "results" ]
Fit and save models based on given data after segmenting the `choosers` table. Segments that have not already been explicitly added will be automatically added with default model. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column with the same name as the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing, e.g. buildings. current_choice Name of column in `choosers` that indicates which alternative they have currently chosen. Returns ------- log_likelihoods : dict of dict Keys will be model names and values will be dictionaries of log-liklihood values as returned by MNLDiscreteChoiceModel.fit.
[ "Fit", "and", "save", "models", "based", "on", "given", "data", "after", "segmenting", "the", "choosers", "table", ".", "Segments", "that", "have", "not", "already", "been", "explicitly", "added", "will", "be", "automatically", "added", "with", "default", "model", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1480-L1526
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel._filter_choosers_alts
def _filter_choosers_alts(self, choosers, alternatives): """ Apply filters to the choosers and alts tables. """ return ( util.apply_filter_query( choosers, self.choosers_predict_filters), util.apply_filter_query( alternatives, self.alts_predict_filters))
python
def _filter_choosers_alts(self, choosers, alternatives): return ( util.apply_filter_query( choosers, self.choosers_predict_filters), util.apply_filter_query( alternatives, self.alts_predict_filters))
[ "def", "_filter_choosers_alts", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "return", "(", "util", ".", "apply_filter_query", "(", "choosers", ",", "self", ".", "choosers_predict_filters", ")", ",", "util", ".", "apply_filter_query", "(", "alternatives", ",", "self", ".", "alts_predict_filters", ")", ")" ]
Apply filters to the choosers and alts tables.
[ "Apply", "filters", "to", "the", "choosers", "and", "alts", "tables", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1536-L1545
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.probabilities
def probabilities(self, choosers, alternatives): """ Returns alternative probabilties for each chooser segment as a dictionary keyed by segment name. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probabilties : dict of pandas.Series """ logger.debug( 'start: calculate probabilities in segmented LCM {}'.format( self.name)) choosers, alternatives = self.apply_predict_filters( choosers, alternatives) result = self._group.probabilities(choosers, alternatives) logger.debug( 'finish: calculate probabilities in segmented LCM {}'.format( self.name)) return result
python
def probabilities(self, choosers, alternatives): logger.debug( 'start: calculate probabilities in segmented LCM {}'.format( self.name)) choosers, alternatives = self.apply_predict_filters( choosers, alternatives) result = self._group.probabilities(choosers, alternatives) logger.debug( 'finish: calculate probabilities in segmented LCM {}'.format( self.name)) return result
[ "def", "probabilities", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "logger", ".", "debug", "(", "'start: calculate probabilities in segmented LCM {}'", ".", "format", "(", "self", ".", "name", ")", ")", "choosers", ",", "alternatives", "=", "self", ".", "apply_predict_filters", "(", "choosers", ",", "alternatives", ")", "result", "=", "self", ".", "_group", ".", "probabilities", "(", "choosers", ",", "alternatives", ")", "logger", ".", "debug", "(", "'finish: calculate probabilities in segmented LCM {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "result" ]
Returns alternative probabilties for each chooser segment as a dictionary keyed by segment name. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probabilties : dict of pandas.Series
[ "Returns", "alternative", "probabilties", "for", "each", "chooser", "segment", "as", "a", "dictionary", "keyed", "by", "segment", "name", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1547-L1574
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.predict
def predict(self, choosers, alternatives, debug=False): """ Choose from among alternatives for a group of agents after segmenting the `choosers` table. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. debug : bool If debug is set to true, will set the variable "sim_pdf" on the object to store the probabilities for mapping of the outcome. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. """ logger.debug( 'start: predict models in segmented LCM {}'.format(self.name)) choosers, alternatives = self._filter_choosers_alts( choosers, alternatives) results = self._group.predict(choosers, alternatives, debug=debug) logger.debug( 'finish: predict models in segmented LCM {}'.format(self.name)) return results
python
def predict(self, choosers, alternatives, debug=False): logger.debug( 'start: predict models in segmented LCM {}'.format(self.name)) choosers, alternatives = self._filter_choosers_alts( choosers, alternatives) results = self._group.predict(choosers, alternatives, debug=debug) logger.debug( 'finish: predict models in segmented LCM {}'.format(self.name)) return results
[ "def", "predict", "(", "self", ",", "choosers", ",", "alternatives", ",", "debug", "=", "False", ")", ":", "logger", ".", "debug", "(", "'start: predict models in segmented LCM {}'", ".", "format", "(", "self", ".", "name", ")", ")", "choosers", ",", "alternatives", "=", "self", ".", "_filter_choosers_alts", "(", "choosers", ",", "alternatives", ")", "results", "=", "self", ".", "_group", ".", "predict", "(", "choosers", ",", "alternatives", ",", "debug", "=", "debug", ")", "logger", ".", "debug", "(", "'finish: predict models in segmented LCM {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "results" ]
Choose from among alternatives for a group of agents after segmenting the `choosers` table. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. debug : bool If debug is set to true, will set the variable "sim_pdf" on the object to store the probabilities for mapping of the outcome. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers.
[ "Choose", "from", "among", "alternatives", "for", "a", "group", "of", "agents", "after", "segmenting", "the", "choosers", "table", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1606-L1639
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel._process_model_dict
def _process_model_dict(self, d): """ Remove redundant items from a model's configuration dict. Parameters ---------- d : dict Modified in place. Returns ------- dict Modified `d`. """ del d['model_type'] del d['sample_size'] del d['probability_mode'] del d['choice_mode'] del d['choosers_fit_filters'] del d['choosers_predict_filters'] del d['alts_fit_filters'] del d['alts_predict_filters'] del d['interaction_predict_filters'] del d['estimation_sample_size'] del d['prediction_sample_size'] del d['choice_column'] if d['model_expression'] == self.default_model_expr: del d['model_expression'] d["name"] = yamlio.to_scalar_safe(d["name"]) return d
python
def _process_model_dict(self, d): del d['model_type'] del d['sample_size'] del d['probability_mode'] del d['choice_mode'] del d['choosers_fit_filters'] del d['choosers_predict_filters'] del d['alts_fit_filters'] del d['alts_predict_filters'] del d['interaction_predict_filters'] del d['estimation_sample_size'] del d['prediction_sample_size'] del d['choice_column'] if d['model_expression'] == self.default_model_expr: del d['model_expression'] d["name"] = yamlio.to_scalar_safe(d["name"]) return d
[ "def", "_process_model_dict", "(", "self", ",", "d", ")", ":", "del", "d", "[", "'model_type'", "]", "del", "d", "[", "'sample_size'", "]", "del", "d", "[", "'probability_mode'", "]", "del", "d", "[", "'choice_mode'", "]", "del", "d", "[", "'choosers_fit_filters'", "]", "del", "d", "[", "'choosers_predict_filters'", "]", "del", "d", "[", "'alts_fit_filters'", "]", "del", "d", "[", "'alts_predict_filters'", "]", "del", "d", "[", "'interaction_predict_filters'", "]", "del", "d", "[", "'estimation_sample_size'", "]", "del", "d", "[", "'prediction_sample_size'", "]", "del", "d", "[", "'choice_column'", "]", "if", "d", "[", "'model_expression'", "]", "==", "self", ".", "default_model_expr", ":", "del", "d", "[", "'model_expression'", "]", "d", "[", "\"name\"", "]", "=", "yamlio", ".", "to_scalar_safe", "(", "d", "[", "\"name\"", "]", ")", "return", "d" ]
Remove redundant items from a model's configuration dict. Parameters ---------- d : dict Modified in place. Returns ------- dict Modified `d`.
[ "Remove", "redundant", "items", "from", "a", "model", "s", "configuration", "dict", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1641-L1674
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.to_dict
def to_dict(self): """ Returns a dict representation of this instance suitable for conversion to YAML. """ return { 'model_type': 'segmented_discretechoice', 'name': self.name, 'segmentation_col': self.segmentation_col, 'sample_size': self.sample_size, 'probability_mode': self.probability_mode, 'choice_mode': self.choice_mode, 'choosers_fit_filters': self.choosers_fit_filters, 'choosers_predict_filters': self.choosers_predict_filters, 'alts_fit_filters': self.alts_fit_filters, 'alts_predict_filters': self.alts_predict_filters, 'interaction_predict_filters': self.interaction_predict_filters, 'estimation_sample_size': self.estimation_sample_size, 'prediction_sample_size': self.prediction_sample_size, 'choice_column': self.choice_column, 'default_config': { 'model_expression': self.default_model_expr, }, 'remove_alts': self.remove_alts, 'fitted': self.fitted, 'models': { yamlio.to_scalar_safe(name): self._process_model_dict(m.to_dict()) for name, m in self._group.models.items() } }
python
def to_dict(self): return { 'model_type': 'segmented_discretechoice', 'name': self.name, 'segmentation_col': self.segmentation_col, 'sample_size': self.sample_size, 'probability_mode': self.probability_mode, 'choice_mode': self.choice_mode, 'choosers_fit_filters': self.choosers_fit_filters, 'choosers_predict_filters': self.choosers_predict_filters, 'alts_fit_filters': self.alts_fit_filters, 'alts_predict_filters': self.alts_predict_filters, 'interaction_predict_filters': self.interaction_predict_filters, 'estimation_sample_size': self.estimation_sample_size, 'prediction_sample_size': self.prediction_sample_size, 'choice_column': self.choice_column, 'default_config': { 'model_expression': self.default_model_expr, }, 'remove_alts': self.remove_alts, 'fitted': self.fitted, 'models': { yamlio.to_scalar_safe(name): self._process_model_dict(m.to_dict()) for name, m in self._group.models.items() } }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'model_type'", ":", "'segmented_discretechoice'", ",", "'name'", ":", "self", ".", "name", ",", "'segmentation_col'", ":", "self", ".", "segmentation_col", ",", "'sample_size'", ":", "self", ".", "sample_size", ",", "'probability_mode'", ":", "self", ".", "probability_mode", ",", "'choice_mode'", ":", "self", ".", "choice_mode", ",", "'choosers_fit_filters'", ":", "self", ".", "choosers_fit_filters", ",", "'choosers_predict_filters'", ":", "self", ".", "choosers_predict_filters", ",", "'alts_fit_filters'", ":", "self", ".", "alts_fit_filters", ",", "'alts_predict_filters'", ":", "self", ".", "alts_predict_filters", ",", "'interaction_predict_filters'", ":", "self", ".", "interaction_predict_filters", ",", "'estimation_sample_size'", ":", "self", ".", "estimation_sample_size", ",", "'prediction_sample_size'", ":", "self", ".", "prediction_sample_size", ",", "'choice_column'", ":", "self", ".", "choice_column", ",", "'default_config'", ":", "{", "'model_expression'", ":", "self", ".", "default_model_expr", ",", "}", ",", "'remove_alts'", ":", "self", ".", "remove_alts", ",", "'fitted'", ":", "self", ".", "fitted", ",", "'models'", ":", "{", "yamlio", ".", "to_scalar_safe", "(", "name", ")", ":", "self", ".", "_process_model_dict", "(", "m", ".", "to_dict", "(", ")", ")", "for", "name", ",", "m", "in", "self", ".", "_group", ".", "models", ".", "items", "(", ")", "}", "}" ]
Returns a dict representation of this instance suitable for conversion to YAML.
[ "Returns", "a", "dict", "representation", "of", "this", "instance", "suitable", "for", "conversion", "to", "YAML", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1676-L1707
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.to_yaml
def to_yaml(self, str_or_buffer=None): """ Save a model respresentation to YAML. Parameters ---------- str_or_buffer : str or file like, optional By default a YAML string is returned. If a string is given here the YAML will be written to that file. If an object with a ``.write`` method is given the YAML will be written to that object. Returns ------- j : str YAML is string if `str_or_buffer` is not given. """ logger.debug('serializing segmented LCM {} to YAML'.format(self.name)) return yamlio.convert_to_yaml(self.to_dict(), str_or_buffer)
python
def to_yaml(self, str_or_buffer=None): logger.debug('serializing segmented LCM {} to YAML'.format(self.name)) return yamlio.convert_to_yaml(self.to_dict(), str_or_buffer)
[ "def", "to_yaml", "(", "self", ",", "str_or_buffer", "=", "None", ")", ":", "logger", ".", "debug", "(", "'serializing segmented LCM {} to YAML'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "yamlio", ".", "convert_to_yaml", "(", "self", ".", "to_dict", "(", ")", ",", "str_or_buffer", ")" ]
Save a model respresentation to YAML. Parameters ---------- str_or_buffer : str or file like, optional By default a YAML string is returned. If a string is given here the YAML will be written to that file. If an object with a ``.write`` method is given the YAML will be written to that object. Returns ------- j : str YAML is string if `str_or_buffer` is not given.
[ "Save", "a", "model", "respresentation", "to", "YAML", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1709-L1728
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.columns_used
def columns_used(self): """ Columns from any table used in the model. May come from either the choosers or alternatives tables. """ return list(tz.unique(tz.concatv( self.choosers_columns_used(), self.alts_columns_used(), self.interaction_columns_used(), util.columns_in_formula(self.default_model_expr), [self.segmentation_col])))
python
def columns_used(self): return list(tz.unique(tz.concatv( self.choosers_columns_used(), self.alts_columns_used(), self.interaction_columns_used(), util.columns_in_formula(self.default_model_expr), [self.segmentation_col])))
[ "def", "columns_used", "(", "self", ")", ":", "return", "list", "(", "tz", ".", "unique", "(", "tz", ".", "concatv", "(", "self", ".", "choosers_columns_used", "(", ")", ",", "self", ".", "alts_columns_used", "(", ")", ",", "self", ".", "interaction_columns_used", "(", ")", ",", "util", ".", "columns_in_formula", "(", "self", ".", "default_model_expr", ")", ",", "[", "self", ".", "segmentation_col", "]", ")", ")", ")" ]
Columns from any table used in the model. May come from either the choosers or alternatives tables.
[ "Columns", "from", "any", "table", "used", "in", "the", "model", ".", "May", "come", "from", "either", "the", "choosers", "or", "alternatives", "tables", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1757-L1768
UDST/urbansim
urbansim/models/dcm.py
SegmentedMNLDiscreteChoiceModel.fit_from_cfg
def fit_from_cfg(cls, choosers, chosen_fname, alternatives, cfgname, outcfgname=None): """ Parameters ---------- choosers : DataFrame A dataframe of rows of agents that have made choices. chosen_fname : string A string indicating the column in the choosers dataframe which gives which alternative the choosers have chosen. alternatives : DataFrame A dataframe of alternatives. It should include the current choices from the choosers dataframe as well as some other alternatives from which to sample. Values in choosers[chosen_fname] should index into the alternatives dataframe. cfgname : string The name of the yaml config file from which to read the discrete choice model. outcfgname : string, optional (default cfgname) The name of the output yaml config file where estimation results are written into. Returns ------- lcm : SegmentedMNLDiscreteChoiceModel which was used to fit """ logger.debug('start: fit from configuration {}'.format(cfgname)) lcm = cls.from_yaml(str_or_buffer=cfgname) lcm.fit(choosers, alternatives, choosers[chosen_fname]) for k, v in lcm._group.models.items(): print("LCM RESULTS FOR SEGMENT %s\n" % str(k)) v.report_fit() outcfgname = outcfgname or cfgname lcm.to_yaml(str_or_buffer=outcfgname) logger.debug('finish: fit into configuration {}'.format(outcfgname)) return lcm
python
def fit_from_cfg(cls, choosers, chosen_fname, alternatives, cfgname, outcfgname=None): logger.debug('start: fit from configuration {}'.format(cfgname)) lcm = cls.from_yaml(str_or_buffer=cfgname) lcm.fit(choosers, alternatives, choosers[chosen_fname]) for k, v in lcm._group.models.items(): print("LCM RESULTS FOR SEGMENT %s\n" % str(k)) v.report_fit() outcfgname = outcfgname or cfgname lcm.to_yaml(str_or_buffer=outcfgname) logger.debug('finish: fit into configuration {}'.format(outcfgname)) return lcm
[ "def", "fit_from_cfg", "(", "cls", ",", "choosers", ",", "chosen_fname", ",", "alternatives", ",", "cfgname", ",", "outcfgname", "=", "None", ")", ":", "logger", ".", "debug", "(", "'start: fit from configuration {}'", ".", "format", "(", "cfgname", ")", ")", "lcm", "=", "cls", ".", "from_yaml", "(", "str_or_buffer", "=", "cfgname", ")", "lcm", ".", "fit", "(", "choosers", ",", "alternatives", ",", "choosers", "[", "chosen_fname", "]", ")", "for", "k", ",", "v", "in", "lcm", ".", "_group", ".", "models", ".", "items", "(", ")", ":", "print", "(", "\"LCM RESULTS FOR SEGMENT %s\\n\"", "%", "str", "(", "k", ")", ")", "v", ".", "report_fit", "(", ")", "outcfgname", "=", "outcfgname", "or", "cfgname", "lcm", ".", "to_yaml", "(", "str_or_buffer", "=", "outcfgname", ")", "logger", ".", "debug", "(", "'finish: fit into configuration {}'", ".", "format", "(", "outcfgname", ")", ")", "return", "lcm" ]
Parameters ---------- choosers : DataFrame A dataframe of rows of agents that have made choices. chosen_fname : string A string indicating the column in the choosers dataframe which gives which alternative the choosers have chosen. alternatives : DataFrame A dataframe of alternatives. It should include the current choices from the choosers dataframe as well as some other alternatives from which to sample. Values in choosers[chosen_fname] should index into the alternatives dataframe. cfgname : string The name of the yaml config file from which to read the discrete choice model. outcfgname : string, optional (default cfgname) The name of the output yaml config file where estimation results are written into. Returns ------- lcm : SegmentedMNLDiscreteChoiceModel which was used to fit
[ "Parameters", "----------", "choosers", ":", "DataFrame", "A", "dataframe", "of", "rows", "of", "agents", "that", "have", "made", "choices", ".", "chosen_fname", ":", "string", "A", "string", "indicating", "the", "column", "in", "the", "choosers", "dataframe", "which", "gives", "which", "alternative", "the", "choosers", "have", "chosen", ".", "alternatives", ":", "DataFrame", "A", "dataframe", "of", "alternatives", ".", "It", "should", "include", "the", "current", "choices", "from", "the", "choosers", "dataframe", "as", "well", "as", "some", "other", "alternatives", "from", "which", "to", "sample", ".", "Values", "in", "choosers", "[", "chosen_fname", "]", "should", "index", "into", "the", "alternatives", "dataframe", ".", "cfgname", ":", "string", "The", "name", "of", "the", "yaml", "config", "file", "from", "which", "to", "read", "the", "discrete", "choice", "model", ".", "outcfgname", ":", "string", "optional", "(", "default", "cfgname", ")", "The", "name", "of", "the", "output", "yaml", "config", "file", "where", "estimation", "results", "are", "written", "into", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1771-L1804
UDST/urbansim
scripts/cache_to_hdf5.py
cache_to_df
def cache_to_df(dir_path): """ Convert a directory of binary array data files to a Pandas DataFrame. Parameters ---------- dir_path : str """ table = {} for attrib in glob.glob(os.path.join(dir_path, '*')): attrib_name, attrib_ext = os.path.splitext(os.path.basename(attrib)) if attrib_ext == '.lf8': attrib_data = np.fromfile(attrib, np.float64) table[attrib_name] = attrib_data elif attrib_ext == '.lf4': attrib_data = np.fromfile(attrib, np.float32) table[attrib_name] = attrib_data elif attrib_ext == '.li2': attrib_data = np.fromfile(attrib, np.int16) table[attrib_name] = attrib_data elif attrib_ext == '.li4': attrib_data = np.fromfile(attrib, np.int32) table[attrib_name] = attrib_data elif attrib_ext == '.li8': attrib_data = np.fromfile(attrib, np.int64) table[attrib_name] = attrib_data elif attrib_ext == '.ib1': attrib_data = np.fromfile(attrib, np.bool_) table[attrib_name] = attrib_data elif attrib_ext.startswith('.iS'): length_string = int(attrib_ext[3:]) attrib_data = np.fromfile(attrib, ('a' + str(length_string))) table[attrib_name] = attrib_data else: print('Array {} is not a recognized data type'.format(attrib)) df = pd.DataFrame(table) return df
python
def cache_to_df(dir_path): table = {} for attrib in glob.glob(os.path.join(dir_path, '*')): attrib_name, attrib_ext = os.path.splitext(os.path.basename(attrib)) if attrib_ext == '.lf8': attrib_data = np.fromfile(attrib, np.float64) table[attrib_name] = attrib_data elif attrib_ext == '.lf4': attrib_data = np.fromfile(attrib, np.float32) table[attrib_name] = attrib_data elif attrib_ext == '.li2': attrib_data = np.fromfile(attrib, np.int16) table[attrib_name] = attrib_data elif attrib_ext == '.li4': attrib_data = np.fromfile(attrib, np.int32) table[attrib_name] = attrib_data elif attrib_ext == '.li8': attrib_data = np.fromfile(attrib, np.int64) table[attrib_name] = attrib_data elif attrib_ext == '.ib1': attrib_data = np.fromfile(attrib, np.bool_) table[attrib_name] = attrib_data elif attrib_ext.startswith('.iS'): length_string = int(attrib_ext[3:]) attrib_data = np.fromfile(attrib, ('a' + str(length_string))) table[attrib_name] = attrib_data else: print('Array {} is not a recognized data type'.format(attrib)) df = pd.DataFrame(table) return df
[ "def", "cache_to_df", "(", "dir_path", ")", ":", "table", "=", "{", "}", "for", "attrib", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "dir_path", ",", "'*'", ")", ")", ":", "attrib_name", ",", "attrib_ext", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "attrib", ")", ")", "if", "attrib_ext", "==", "'.lf8'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "float64", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", "==", "'.lf4'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "float32", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", "==", "'.li2'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "int16", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", "==", "'.li4'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "int32", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", "==", "'.li8'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "int64", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", "==", "'.ib1'", ":", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "np", ".", "bool_", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "elif", "attrib_ext", ".", "startswith", "(", "'.iS'", ")", ":", "length_string", "=", "int", "(", "attrib_ext", "[", "3", ":", "]", ")", "attrib_data", "=", "np", ".", "fromfile", "(", "attrib", ",", "(", "'a'", "+", "str", "(", "length_string", ")", ")", ")", "table", "[", "attrib_name", "]", "=", "attrib_data", "else", ":", "print", "(", "'Array {} is not a recognized data type'", ".", "format", "(", "attrib", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "table", ")", "return", "df" ]
Convert a directory of binary array data files to a Pandas DataFrame. Parameters ---------- dir_path : str
[ "Convert", "a", "directory", "of", "binary", "array", "data", "files", "to", "a", "Pandas", "DataFrame", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/scripts/cache_to_hdf5.py#L14-L60
UDST/urbansim
scripts/cache_to_hdf5.py
convert_dirs
def convert_dirs(base_dir, hdf_name, complib=None, complevel=0): """ Convert nested set of directories to """ print('Converting directories in {}'.format(base_dir)) dirs = glob.glob(os.path.join(base_dir, '*')) dirs = {d for d in dirs if os.path.basename(d) in DIRECTORIES} if not dirs: raise RuntimeError('No direcotries found matching known data.') store = pd.HDFStore( hdf_name, mode='w', complevel=complevel, complib=complib) for dirpath in dirs: dirname = os.path.basename(dirpath) print(dirname) df = cache_to_df(dirpath) if dirname == 'travel_data': keys = ['from_zone_id', 'to_zone_id'] elif dirname == 'annual_employment_control_totals': keys = ['sector_id', 'year', 'home_based_status'] elif dirname == 'annual_job_relocation_rates': keys = ['sector_id'] elif dirname == 'annual_household_control_totals': keys = ['year'] elif dirname == 'annual_household_relocation_rates': keys = ['age_of_head_max', 'age_of_head_min', 'income_min', 'income_max'] elif dirname == 'building_sqft_per_job': keys = ['zone_id', 'building_type_id'] elif dirname == 'counties': keys = ['county_id'] elif dirname == 'development_event_history': keys = ['building_id'] elif dirname == 'target_vacancies': keys = ['building_type_id', 'year'] else: keys = [dirname[:-1] + '_id'] if dirname != 'annual_household_relocation_rates': df = df.set_index(keys) for colname in df.columns: if df[colname].dtype == np.float64: df[colname] = df[colname].astype(np.float32) elif df[colname].dtype == np.int64: df[colname] = df[colname].astype(np.int32) else: df[colname] = df[colname] df.info() print(os.linesep) store.put(dirname, df) store.close()
python
def convert_dirs(base_dir, hdf_name, complib=None, complevel=0): print('Converting directories in {}'.format(base_dir)) dirs = glob.glob(os.path.join(base_dir, '*')) dirs = {d for d in dirs if os.path.basename(d) in DIRECTORIES} if not dirs: raise RuntimeError('No direcotries found matching known data.') store = pd.HDFStore( hdf_name, mode='w', complevel=complevel, complib=complib) for dirpath in dirs: dirname = os.path.basename(dirpath) print(dirname) df = cache_to_df(dirpath) if dirname == 'travel_data': keys = ['from_zone_id', 'to_zone_id'] elif dirname == 'annual_employment_control_totals': keys = ['sector_id', 'year', 'home_based_status'] elif dirname == 'annual_job_relocation_rates': keys = ['sector_id'] elif dirname == 'annual_household_control_totals': keys = ['year'] elif dirname == 'annual_household_relocation_rates': keys = ['age_of_head_max', 'age_of_head_min', 'income_min', 'income_max'] elif dirname == 'building_sqft_per_job': keys = ['zone_id', 'building_type_id'] elif dirname == 'counties': keys = ['county_id'] elif dirname == 'development_event_history': keys = ['building_id'] elif dirname == 'target_vacancies': keys = ['building_type_id', 'year'] else: keys = [dirname[:-1] + '_id'] if dirname != 'annual_household_relocation_rates': df = df.set_index(keys) for colname in df.columns: if df[colname].dtype == np.float64: df[colname] = df[colname].astype(np.float32) elif df[colname].dtype == np.int64: df[colname] = df[colname].astype(np.int32) else: df[colname] = df[colname] df.info() print(os.linesep) store.put(dirname, df) store.close()
[ "def", "convert_dirs", "(", "base_dir", ",", "hdf_name", ",", "complib", "=", "None", ",", "complevel", "=", "0", ")", ":", "print", "(", "'Converting directories in {}'", ".", "format", "(", "base_dir", ")", ")", "dirs", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "base_dir", ",", "'*'", ")", ")", "dirs", "=", "{", "d", "for", "d", "in", "dirs", "if", "os", ".", "path", ".", "basename", "(", "d", ")", "in", "DIRECTORIES", "}", "if", "not", "dirs", ":", "raise", "RuntimeError", "(", "'No direcotries found matching known data.'", ")", "store", "=", "pd", ".", "HDFStore", "(", "hdf_name", ",", "mode", "=", "'w'", ",", "complevel", "=", "complevel", ",", "complib", "=", "complib", ")", "for", "dirpath", "in", "dirs", ":", "dirname", "=", "os", ".", "path", ".", "basename", "(", "dirpath", ")", "print", "(", "dirname", ")", "df", "=", "cache_to_df", "(", "dirpath", ")", "if", "dirname", "==", "'travel_data'", ":", "keys", "=", "[", "'from_zone_id'", ",", "'to_zone_id'", "]", "elif", "dirname", "==", "'annual_employment_control_totals'", ":", "keys", "=", "[", "'sector_id'", ",", "'year'", ",", "'home_based_status'", "]", "elif", "dirname", "==", "'annual_job_relocation_rates'", ":", "keys", "=", "[", "'sector_id'", "]", "elif", "dirname", "==", "'annual_household_control_totals'", ":", "keys", "=", "[", "'year'", "]", "elif", "dirname", "==", "'annual_household_relocation_rates'", ":", "keys", "=", "[", "'age_of_head_max'", ",", "'age_of_head_min'", ",", "'income_min'", ",", "'income_max'", "]", "elif", "dirname", "==", "'building_sqft_per_job'", ":", "keys", "=", "[", "'zone_id'", ",", "'building_type_id'", "]", "elif", "dirname", "==", "'counties'", ":", "keys", "=", "[", "'county_id'", "]", "elif", "dirname", "==", "'development_event_history'", ":", "keys", "=", "[", "'building_id'", "]", "elif", "dirname", "==", "'target_vacancies'", ":", "keys", "=", "[", "'building_type_id'", ",", "'year'", "]", "else", ":", "keys", "=", "[", "dirname", "[", ":", "-", "1", "]", "+", "'_id'", "]", "if", "dirname", "!=", "'annual_household_relocation_rates'", ":", "df", "=", "df", ".", "set_index", "(", "keys", ")", "for", "colname", "in", "df", ".", "columns", ":", "if", "df", "[", "colname", "]", ".", "dtype", "==", "np", ".", "float64", ":", "df", "[", "colname", "]", "=", "df", "[", "colname", "]", ".", "astype", "(", "np", ".", "float32", ")", "elif", "df", "[", "colname", "]", ".", "dtype", "==", "np", ".", "int64", ":", "df", "[", "colname", "]", "=", "df", "[", "colname", "]", ".", "astype", "(", "np", ".", "int32", ")", "else", ":", "df", "[", "colname", "]", "=", "df", "[", "colname", "]", "df", ".", "info", "(", ")", "print", "(", "os", ".", "linesep", ")", "store", ".", "put", "(", "dirname", ",", "df", ")", "store", ".", "close", "(", ")" ]
Convert nested set of directories to
[ "Convert", "nested", "set", "of", "directories", "to" ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/scripts/cache_to_hdf5.py#L72-L130
UDST/urbansim
urbansim/utils/misc.py
get_run_number
def get_run_number(): """ Get a run number for this execution of the model system, for identifying the output hdf5 files). Returns ------- The integer number for this run of the model system. """ try: f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'r') num = int(f.read()) f.close() except Exception: num = 1 f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'w') f.write(str(num + 1)) f.close() return num
python
def get_run_number(): try: f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'r') num = int(f.read()) f.close() except Exception: num = 1 f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'w') f.write(str(num + 1)) f.close() return num
[ "def", "get_run_number", "(", ")", ":", "try", ":", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getenv", "(", "'DATA_HOME'", ",", "\".\"", ")", ",", "'RUNNUM'", ")", ",", "'r'", ")", "num", "=", "int", "(", "f", ".", "read", "(", ")", ")", "f", ".", "close", "(", ")", "except", "Exception", ":", "num", "=", "1", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getenv", "(", "'DATA_HOME'", ",", "\".\"", ")", ",", "'RUNNUM'", ")", ",", "'w'", ")", "f", ".", "write", "(", "str", "(", "num", "+", "1", ")", ")", "f", ".", "close", "(", ")", "return", "num" ]
Get a run number for this execution of the model system, for identifying the output hdf5 files). Returns ------- The integer number for this run of the model system.
[ "Get", "a", "run", "number", "for", "this", "execution", "of", "the", "model", "system", "for", "identifying", "the", "output", "hdf5", "files", ")", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L97-L115
UDST/urbansim
urbansim/utils/misc.py
compute_range
def compute_range(travel_data, attr, travel_time_attr, dist, agg=np.sum): """ Compute a zone-based accessibility query using the urbansim format travel data dataframe. Parameters ---------- travel_data : dataframe The dataframe of urbansim format travel data. Has from_zone_id as first index, to_zone_id as second index, and different impedances between zones as columns. attr : series The attr to aggregate. Should be indexed by zone_id and the values will be aggregated. travel_time_attr : string The column name in travel_data to use as the impedance. dist : float The max distance to aggregate up to agg : function, optional, np.sum by default The numpy function to use for aggregation """ travel_data = travel_data.reset_index(level=1) travel_data = travel_data[travel_data[travel_time_attr] < dist] travel_data["attr"] = attr[travel_data.to_zone_id].values return travel_data.groupby(level=0).attr.apply(agg)
python
def compute_range(travel_data, attr, travel_time_attr, dist, agg=np.sum): travel_data = travel_data.reset_index(level=1) travel_data = travel_data[travel_data[travel_time_attr] < dist] travel_data["attr"] = attr[travel_data.to_zone_id].values return travel_data.groupby(level=0).attr.apply(agg)
[ "def", "compute_range", "(", "travel_data", ",", "attr", ",", "travel_time_attr", ",", "dist", ",", "agg", "=", "np", ".", "sum", ")", ":", "travel_data", "=", "travel_data", ".", "reset_index", "(", "level", "=", "1", ")", "travel_data", "=", "travel_data", "[", "travel_data", "[", "travel_time_attr", "]", "<", "dist", "]", "travel_data", "[", "\"attr\"", "]", "=", "attr", "[", "travel_data", ".", "to_zone_id", "]", ".", "values", "return", "travel_data", ".", "groupby", "(", "level", "=", "0", ")", ".", "attr", ".", "apply", "(", "agg", ")" ]
Compute a zone-based accessibility query using the urbansim format travel data dataframe. Parameters ---------- travel_data : dataframe The dataframe of urbansim format travel data. Has from_zone_id as first index, to_zone_id as second index, and different impedances between zones as columns. attr : series The attr to aggregate. Should be indexed by zone_id and the values will be aggregated. travel_time_attr : string The column name in travel_data to use as the impedance. dist : float The max distance to aggregate up to agg : function, optional, np.sum by default The numpy function to use for aggregation
[ "Compute", "a", "zone", "-", "based", "accessibility", "query", "using", "the", "urbansim", "format", "travel", "data", "dataframe", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L118-L142
UDST/urbansim
urbansim/utils/misc.py
reindex
def reindex(series1, series2): """ This reindexes the first series by the second series. This is an extremely common operation that does not appear to be in Pandas at this time. If anyone knows of an easier way to do this in Pandas, please inform the UrbanSim developers. The canonical example would be a parcel series which has an index which is parcel_ids and a value which you want to fetch, let's say it's land_area. Another dataset, let's say of buildings has a series which indicate the parcel_ids that the buildings are located on, but which does not have land_area. If you pass parcels.land_area as the first series and buildings.parcel_id as the second series, this function returns a series which is indexed by buildings and has land_area as values and can be added to the buildings dataset. In short, this is a join on to a different table using a foreign key stored in the current table, but with only one attribute rather than for a full dataset. This is very similar to the pandas "loc" function or "reindex" function, but neither of those functions return the series indexed on the current table. In both of those cases, the series would be indexed on the foreign table and would require a second step to change the index. """ # turns out the merge is much faster than the .loc below df = pd.merge(pd.DataFrame({"left": series2}), pd.DataFrame({"right": series1}), left_on="left", right_index=True, how="left") return df.right
python
def reindex(series1, series2): df = pd.merge(pd.DataFrame({"left": series2}), pd.DataFrame({"right": series1}), left_on="left", right_index=True, how="left") return df.right
[ "def", "reindex", "(", "series1", ",", "series2", ")", ":", "# turns out the merge is much faster than the .loc below", "df", "=", "pd", ".", "merge", "(", "pd", ".", "DataFrame", "(", "{", "\"left\"", ":", "series2", "}", ")", ",", "pd", ".", "DataFrame", "(", "{", "\"right\"", ":", "series1", "}", ")", ",", "left_on", "=", "\"left\"", ",", "right_index", "=", "True", ",", "how", "=", "\"left\"", ")", "return", "df", ".", "right" ]
This reindexes the first series by the second series. This is an extremely common operation that does not appear to be in Pandas at this time. If anyone knows of an easier way to do this in Pandas, please inform the UrbanSim developers. The canonical example would be a parcel series which has an index which is parcel_ids and a value which you want to fetch, let's say it's land_area. Another dataset, let's say of buildings has a series which indicate the parcel_ids that the buildings are located on, but which does not have land_area. If you pass parcels.land_area as the first series and buildings.parcel_id as the second series, this function returns a series which is indexed by buildings and has land_area as values and can be added to the buildings dataset. In short, this is a join on to a different table using a foreign key stored in the current table, but with only one attribute rather than for a full dataset. This is very similar to the pandas "loc" function or "reindex" function, but neither of those functions return the series indexed on the current table. In both of those cases, the series would be indexed on the foreign table and would require a second step to change the index.
[ "This", "reindexes", "the", "first", "series", "by", "the", "second", "series", ".", "This", "is", "an", "extremely", "common", "operation", "that", "does", "not", "appear", "to", "be", "in", "Pandas", "at", "this", "time", ".", "If", "anyone", "knows", "of", "an", "easier", "way", "to", "do", "this", "in", "Pandas", "please", "inform", "the", "UrbanSim", "developers", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L145-L177
UDST/urbansim
urbansim/utils/misc.py
fidx
def fidx(right, left, left_fk=None): """ Re-indexes a series or data frame (right) to align with another (left) series or data frame via foreign key relationship. The index of the right must be unique. This is similar to misc.reindex, but allows for data frame re-indexes and supports re-indexing data frames or series with a multi-index. Parameters: ----------- right: pandas.DataFrame or pandas.Series Series or data frame to re-index from. left: pandas.Series or pandas.DataFrame Series or data frame to re-index to. If a series is provided, its values serve as the foreign keys. If a data frame is provided, one or more columns may be used as foreign keys, must specify the ``left_fk`` argument to specify which column(s) will serve as keys. left_fk: optional, str or list of str Used when the left is a data frame, specifies the column(s) in the left to serve as foreign keys. The specified columns' ordering must match the order of the multi-index in the right. Returns: -------- pandas.Series or pandas.DataFrame with column(s) from right aligned with the left. """ # ensure that we can align correctly if not right.index.is_unique: raise ValueError("The right's index must be unique!") # simpler case: # if the left (target) is a single series then just re-index to it if isinstance(left_fk, str): left = left[left_fk] if isinstance(left, pd.Series): a = right.reindex(left) a.index = left.index return a # when reindexing using multiple columns (composite foreign key) # i.e. the right has a multindex # if a series for the right provided, convert to a data frame if isinstance(right, pd.Series): right = right.to_frame('right') right_cols = 'right' else: right_cols = right.columns # do the merge return pd.merge( left=left, right=right, left_on=left_fk, right_index=True, how='left' )[right_cols]
python
def fidx(right, left, left_fk=None): if not right.index.is_unique: raise ValueError("The right's index must be unique!") if isinstance(left_fk, str): left = left[left_fk] if isinstance(left, pd.Series): a = right.reindex(left) a.index = left.index return a if isinstance(right, pd.Series): right = right.to_frame('right') right_cols = 'right' else: right_cols = right.columns return pd.merge( left=left, right=right, left_on=left_fk, right_index=True, how='left' )[right_cols]
[ "def", "fidx", "(", "right", ",", "left", ",", "left_fk", "=", "None", ")", ":", "# ensure that we can align correctly", "if", "not", "right", ".", "index", ".", "is_unique", ":", "raise", "ValueError", "(", "\"The right's index must be unique!\"", ")", "# simpler case:", "# if the left (target) is a single series then just re-index to it", "if", "isinstance", "(", "left_fk", ",", "str", ")", ":", "left", "=", "left", "[", "left_fk", "]", "if", "isinstance", "(", "left", ",", "pd", ".", "Series", ")", ":", "a", "=", "right", ".", "reindex", "(", "left", ")", "a", ".", "index", "=", "left", ".", "index", "return", "a", "# when reindexing using multiple columns (composite foreign key)", "# i.e. the right has a multindex", "# if a series for the right provided, convert to a data frame", "if", "isinstance", "(", "right", ",", "pd", ".", "Series", ")", ":", "right", "=", "right", ".", "to_frame", "(", "'right'", ")", "right_cols", "=", "'right'", "else", ":", "right_cols", "=", "right", ".", "columns", "# do the merge", "return", "pd", ".", "merge", "(", "left", "=", "left", ",", "right", "=", "right", ",", "left_on", "=", "left_fk", ",", "right_index", "=", "True", ",", "how", "=", "'left'", ")", "[", "right_cols", "]" ]
Re-indexes a series or data frame (right) to align with another (left) series or data frame via foreign key relationship. The index of the right must be unique. This is similar to misc.reindex, but allows for data frame re-indexes and supports re-indexing data frames or series with a multi-index. Parameters: ----------- right: pandas.DataFrame or pandas.Series Series or data frame to re-index from. left: pandas.Series or pandas.DataFrame Series or data frame to re-index to. If a series is provided, its values serve as the foreign keys. If a data frame is provided, one or more columns may be used as foreign keys, must specify the ``left_fk`` argument to specify which column(s) will serve as keys. left_fk: optional, str or list of str Used when the left is a data frame, specifies the column(s) in the left to serve as foreign keys. The specified columns' ordering must match the order of the multi-index in the right. Returns: -------- pandas.Series or pandas.DataFrame with column(s) from right aligned with the left.
[ "Re", "-", "indexes", "a", "series", "or", "data", "frame", "(", "right", ")", "to", "align", "with", "another", "(", "left", ")", "series", "or", "data", "frame", "via", "foreign", "key", "relationship", ".", "The", "index", "of", "the", "right", "must", "be", "unique", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L182-L244
UDST/urbansim
urbansim/utils/misc.py
numpymat2df
def numpymat2df(mat): """ Sometimes (though not very often) it is useful to convert a numpy matrix which has no column names to a Pandas dataframe for use of the Pandas functions. This method converts a 2D numpy matrix to Pandas dataframe with default column headers. Parameters ---------- mat : The numpy matrix Returns ------- A pandas dataframe with the same data as the input matrix but with columns named x0, x1, ... x[n-1] for the number of columns. """ return pd.DataFrame( dict(('x%d' % i, mat[:, i]) for i in range(mat.shape[1])))
python
def numpymat2df(mat): return pd.DataFrame( dict(('x%d' % i, mat[:, i]) for i in range(mat.shape[1])))
[ "def", "numpymat2df", "(", "mat", ")", ":", "return", "pd", ".", "DataFrame", "(", "dict", "(", "(", "'x%d'", "%", "i", ",", "mat", "[", ":", ",", "i", "]", ")", "for", "i", "in", "range", "(", "mat", ".", "shape", "[", "1", "]", ")", ")", ")" ]
Sometimes (though not very often) it is useful to convert a numpy matrix which has no column names to a Pandas dataframe for use of the Pandas functions. This method converts a 2D numpy matrix to Pandas dataframe with default column headers. Parameters ---------- mat : The numpy matrix Returns ------- A pandas dataframe with the same data as the input matrix but with columns named x0, x1, ... x[n-1] for the number of columns.
[ "Sometimes", "(", "though", "not", "very", "often", ")", "it", "is", "useful", "to", "convert", "a", "numpy", "matrix", "which", "has", "no", "column", "names", "to", "a", "Pandas", "dataframe", "for", "use", "of", "the", "Pandas", "functions", ".", "This", "method", "converts", "a", "2D", "numpy", "matrix", "to", "Pandas", "dataframe", "with", "default", "column", "headers", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L300-L317
UDST/urbansim
urbansim/utils/misc.py
df64bitto32bit
def df64bitto32bit(tbl): """ Convert a Pandas dataframe from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- tbl : The dataframe to convert Returns ------- The converted dataframe """ newtbl = pd.DataFrame(index=tbl.index) for colname in tbl.columns: newtbl[colname] = series64bitto32bit(tbl[colname]) return newtbl
python
def df64bitto32bit(tbl): newtbl = pd.DataFrame(index=tbl.index) for colname in tbl.columns: newtbl[colname] = series64bitto32bit(tbl[colname]) return newtbl
[ "def", "df64bitto32bit", "(", "tbl", ")", ":", "newtbl", "=", "pd", ".", "DataFrame", "(", "index", "=", "tbl", ".", "index", ")", "for", "colname", "in", "tbl", ".", "columns", ":", "newtbl", "[", "colname", "]", "=", "series64bitto32bit", "(", "tbl", "[", "colname", "]", ")", "return", "newtbl" ]
Convert a Pandas dataframe from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- tbl : The dataframe to convert Returns ------- The converted dataframe
[ "Convert", "a", "Pandas", "dataframe", "from", "64", "bit", "types", "to", "32", "bit", "types", "to", "save", "memory", "or", "disk", "space", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L320-L336
UDST/urbansim
urbansim/utils/misc.py
series64bitto32bit
def series64bitto32bit(s): """ Convert a Pandas series from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- s : The series to convert Returns ------- The converted series """ if s.dtype == np.float64: return s.astype('float32') elif s.dtype == np.int64: return s.astype('int32') return s
python
def series64bitto32bit(s): if s.dtype == np.float64: return s.astype('float32') elif s.dtype == np.int64: return s.astype('int32') return s
[ "def", "series64bitto32bit", "(", "s", ")", ":", "if", "s", ".", "dtype", "==", "np", ".", "float64", ":", "return", "s", ".", "astype", "(", "'float32'", ")", "elif", "s", ".", "dtype", "==", "np", ".", "int64", ":", "return", "s", ".", "astype", "(", "'int32'", ")", "return", "s" ]
Convert a Pandas series from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- s : The series to convert Returns ------- The converted series
[ "Convert", "a", "Pandas", "series", "from", "64", "bit", "types", "to", "32", "bit", "types", "to", "save", "memory", "or", "disk", "space", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L339-L356
UDST/urbansim
urbansim/utils/misc.py
pandasdfsummarytojson
def pandasdfsummarytojson(df, ndigits=3): """ Convert the result of a Parameters ---------- df : The result of a Pandas describe operation. ndigits : int, optional - The number of significant digits to round to. Returns ------- A json object which captures the describe. Keys are field names and values are dictionaries with all of the indexes returned by the Pandas describe. """ df = df.transpose() return {k: _pandassummarytojson(v, ndigits) for k, v in df.iterrows()}
python
def pandasdfsummarytojson(df, ndigits=3): df = df.transpose() return {k: _pandassummarytojson(v, ndigits) for k, v in df.iterrows()}
[ "def", "pandasdfsummarytojson", "(", "df", ",", "ndigits", "=", "3", ")", ":", "df", "=", "df", ".", "transpose", "(", ")", "return", "{", "k", ":", "_pandassummarytojson", "(", "v", ",", "ndigits", ")", "for", "k", ",", "v", "in", "df", ".", "iterrows", "(", ")", "}" ]
Convert the result of a Parameters ---------- df : The result of a Pandas describe operation. ndigits : int, optional - The number of significant digits to round to. Returns ------- A json object which captures the describe. Keys are field names and values are dictionaries with all of the indexes returned by the Pandas describe.
[ "Convert", "the", "result", "of", "a" ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L363-L379
UDST/urbansim
urbansim/utils/misc.py
column_map
def column_map(tables, columns): """ Take a list of tables and a list of column names and resolve which columns come from which table. Parameters ---------- tables : sequence of _DataFrameWrapper or _TableFuncWrapper Could also be sequence of modified pandas.DataFrames, the important thing is that they have ``.name`` and ``.columns`` attributes. columns : sequence of str The column names of interest. Returns ------- col_map : dict Maps table names to lists of column names. """ if not columns: return {t.name: None for t in tables} columns = set(columns) colmap = {t.name: list(set(t.columns).intersection(columns)) for t in tables} foundcols = tz.reduce(lambda x, y: x.union(y), (set(v) for v in colmap.values())) if foundcols != columns: raise RuntimeError('Not all required columns were found. ' 'Missing: {}'.format(list(columns - foundcols))) return colmap
python
def column_map(tables, columns): if not columns: return {t.name: None for t in tables} columns = set(columns) colmap = {t.name: list(set(t.columns).intersection(columns)) for t in tables} foundcols = tz.reduce(lambda x, y: x.union(y), (set(v) for v in colmap.values())) if foundcols != columns: raise RuntimeError('Not all required columns were found. ' 'Missing: {}'.format(list(columns - foundcols))) return colmap
[ "def", "column_map", "(", "tables", ",", "columns", ")", ":", "if", "not", "columns", ":", "return", "{", "t", ".", "name", ":", "None", "for", "t", "in", "tables", "}", "columns", "=", "set", "(", "columns", ")", "colmap", "=", "{", "t", ".", "name", ":", "list", "(", "set", "(", "t", ".", "columns", ")", ".", "intersection", "(", "columns", ")", ")", "for", "t", "in", "tables", "}", "foundcols", "=", "tz", ".", "reduce", "(", "lambda", "x", ",", "y", ":", "x", ".", "union", "(", "y", ")", ",", "(", "set", "(", "v", ")", "for", "v", "in", "colmap", ".", "values", "(", ")", ")", ")", "if", "foundcols", "!=", "columns", ":", "raise", "RuntimeError", "(", "'Not all required columns were found. '", "'Missing: {}'", ".", "format", "(", "list", "(", "columns", "-", "foundcols", ")", ")", ")", "return", "colmap" ]
Take a list of tables and a list of column names and resolve which columns come from which table. Parameters ---------- tables : sequence of _DataFrameWrapper or _TableFuncWrapper Could also be sequence of modified pandas.DataFrames, the important thing is that they have ``.name`` and ``.columns`` attributes. columns : sequence of str The column names of interest. Returns ------- col_map : dict Maps table names to lists of column names.
[ "Take", "a", "list", "of", "tables", "and", "a", "list", "of", "column", "names", "and", "resolve", "which", "columns", "come", "from", "which", "table", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L382-L410
UDST/urbansim
urbansim/utils/misc.py
column_list
def column_list(tables, columns): """ Take a list of tables and a list of column names and return the columns that are present in the tables. Parameters ---------- tables : sequence of _DataFrameWrapper or _TableFuncWrapper Could also be sequence of modified pandas.DataFrames, the important thing is that they have ``.name`` and ``.columns`` attributes. columns : sequence of str The column names of interest. Returns ------- cols : list Lists of column names available in the tables. """ columns = set(columns) foundcols = tz.reduce(lambda x, y: x.union(y), (set(t.columns) for t in tables)) return list(columns.intersection(foundcols))
python
def column_list(tables, columns): columns = set(columns) foundcols = tz.reduce(lambda x, y: x.union(y), (set(t.columns) for t in tables)) return list(columns.intersection(foundcols))
[ "def", "column_list", "(", "tables", ",", "columns", ")", ":", "columns", "=", "set", "(", "columns", ")", "foundcols", "=", "tz", ".", "reduce", "(", "lambda", "x", ",", "y", ":", "x", ".", "union", "(", "y", ")", ",", "(", "set", "(", "t", ".", "columns", ")", "for", "t", "in", "tables", ")", ")", "return", "list", "(", "columns", ".", "intersection", "(", "foundcols", ")", ")" ]
Take a list of tables and a list of column names and return the columns that are present in the tables. Parameters ---------- tables : sequence of _DataFrameWrapper or _TableFuncWrapper Could also be sequence of modified pandas.DataFrames, the important thing is that they have ``.name`` and ``.columns`` attributes. columns : sequence of str The column names of interest. Returns ------- cols : list Lists of column names available in the tables.
[ "Take", "a", "list", "of", "tables", "and", "a", "list", "of", "column", "names", "and", "return", "the", "columns", "that", "are", "present", "in", "the", "tables", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L413-L434
UDST/urbansim
urbansim/utils/sampling.py
get_probs
def get_probs(data, prob_column=None): """ Checks for presence of a probability column and returns the result as a numpy array. If the probabilities are weights (i.e. they don't sum to 1), then this will be recalculated. Parameters ---------- data: pandas.DataFrame Table to sample from. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. Returns ------- numpy.array """ if prob_column is None: p = None else: p = data[prob_column].fillna(0).values if p.sum() == 0: p = np.ones(len(p)) if abs(p.sum() - 1.0) > 1e-8: p = p / (1.0 * p.sum()) return p
python
def get_probs(data, prob_column=None): if prob_column is None: p = None else: p = data[prob_column].fillna(0).values if p.sum() == 0: p = np.ones(len(p)) if abs(p.sum() - 1.0) > 1e-8: p = p / (1.0 * p.sum()) return p
[ "def", "get_probs", "(", "data", ",", "prob_column", "=", "None", ")", ":", "if", "prob_column", "is", "None", ":", "p", "=", "None", "else", ":", "p", "=", "data", "[", "prob_column", "]", ".", "fillna", "(", "0", ")", ".", "values", "if", "p", ".", "sum", "(", ")", "==", "0", ":", "p", "=", "np", ".", "ones", "(", "len", "(", "p", ")", ")", "if", "abs", "(", "p", ".", "sum", "(", ")", "-", "1.0", ")", ">", "1e-8", ":", "p", "=", "p", "/", "(", "1.0", "*", "p", ".", "sum", "(", ")", ")", "return", "p" ]
Checks for presence of a probability column and returns the result as a numpy array. If the probabilities are weights (i.e. they don't sum to 1), then this will be recalculated. Parameters ---------- data: pandas.DataFrame Table to sample from. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. Returns ------- numpy.array
[ "Checks", "for", "presence", "of", "a", "probability", "column", "and", "returns", "the", "result", "as", "a", "numpy", "array", ".", "If", "the", "probabilities", "are", "weights", "(", "i", ".", "e", ".", "they", "don", "t", "sum", "to", "1", ")", "then", "this", "will", "be", "recalculated", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/sampling.py#L6-L32
UDST/urbansim
urbansim/utils/sampling.py
accounting_sample_replace
def accounting_sample_replace(total, data, accounting_column, prob_column=None, max_iterations=50): """ Sample rows with accounting with replacement. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. accounting_column: string Name of column with accounting totals/quantities to apply towards the control. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. max_iterations: int, optional, default 50 When using an accounting attribute, the maximum number of sampling iterations that will be applied. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool Indicates if the total was matched exactly. """ # check for probabilities p = get_probs(data, prob_column) # determine avg number of accounting items per sample (e.g. persons per household) per_sample = data[accounting_column].sum() / (1.0 * len(data.index.values)) curr_total = 0 remaining = total sample_rows = pd.DataFrame() closest = None closest_remain = total matched = False for i in range(0, max_iterations): # stop if we've hit the control if remaining == 0: matched = True break # if sampling with probabilities, re-caclc the # of items per sample # after the initial sample, this way the sample size reflects the probabilities if p is not None and i == 1: per_sample = sample_rows[accounting_column].sum() / (1.0 * len(sample_rows)) # update the sample num_samples = int(math.ceil(math.fabs(remaining) / per_sample)) if remaining > 0: # we're short, add to the sample curr_ids = np.random.choice(data.index.values, num_samples, p=p) sample_rows = pd.concat([sample_rows, data.loc[curr_ids]]) else: # we've overshot, remove from existing samples (FIFO) sample_rows = sample_rows.iloc[num_samples:].copy() # update the total and check for the closest result curr_total = sample_rows[accounting_column].sum() remaining = total - curr_total if abs(remaining) < closest_remain: closest_remain = abs(remaining) closest = sample_rows return closest, matched
python
def accounting_sample_replace(total, data, accounting_column, prob_column=None, max_iterations=50): p = get_probs(data, prob_column) per_sample = data[accounting_column].sum() / (1.0 * len(data.index.values)) curr_total = 0 remaining = total sample_rows = pd.DataFrame() closest = None closest_remain = total matched = False for i in range(0, max_iterations): if remaining == 0: matched = True break if p is not None and i == 1: per_sample = sample_rows[accounting_column].sum() / (1.0 * len(sample_rows)) num_samples = int(math.ceil(math.fabs(remaining) / per_sample)) if remaining > 0: curr_ids = np.random.choice(data.index.values, num_samples, p=p) sample_rows = pd.concat([sample_rows, data.loc[curr_ids]]) else: sample_rows = sample_rows.iloc[num_samples:].copy() curr_total = sample_rows[accounting_column].sum() remaining = total - curr_total if abs(remaining) < closest_remain: closest_remain = abs(remaining) closest = sample_rows return closest, matched
[ "def", "accounting_sample_replace", "(", "total", ",", "data", ",", "accounting_column", ",", "prob_column", "=", "None", ",", "max_iterations", "=", "50", ")", ":", "# check for probabilities", "p", "=", "get_probs", "(", "data", ",", "prob_column", ")", "# determine avg number of accounting items per sample (e.g. persons per household)", "per_sample", "=", "data", "[", "accounting_column", "]", ".", "sum", "(", ")", "/", "(", "1.0", "*", "len", "(", "data", ".", "index", ".", "values", ")", ")", "curr_total", "=", "0", "remaining", "=", "total", "sample_rows", "=", "pd", ".", "DataFrame", "(", ")", "closest", "=", "None", "closest_remain", "=", "total", "matched", "=", "False", "for", "i", "in", "range", "(", "0", ",", "max_iterations", ")", ":", "# stop if we've hit the control", "if", "remaining", "==", "0", ":", "matched", "=", "True", "break", "# if sampling with probabilities, re-caclc the # of items per sample", "# after the initial sample, this way the sample size reflects the probabilities", "if", "p", "is", "not", "None", "and", "i", "==", "1", ":", "per_sample", "=", "sample_rows", "[", "accounting_column", "]", ".", "sum", "(", ")", "/", "(", "1.0", "*", "len", "(", "sample_rows", ")", ")", "# update the sample", "num_samples", "=", "int", "(", "math", ".", "ceil", "(", "math", ".", "fabs", "(", "remaining", ")", "/", "per_sample", ")", ")", "if", "remaining", ">", "0", ":", "# we're short, add to the sample", "curr_ids", "=", "np", ".", "random", ".", "choice", "(", "data", ".", "index", ".", "values", ",", "num_samples", ",", "p", "=", "p", ")", "sample_rows", "=", "pd", ".", "concat", "(", "[", "sample_rows", ",", "data", ".", "loc", "[", "curr_ids", "]", "]", ")", "else", ":", "# we've overshot, remove from existing samples (FIFO)", "sample_rows", "=", "sample_rows", ".", "iloc", "[", "num_samples", ":", "]", ".", "copy", "(", ")", "# update the total and check for the closest result", "curr_total", "=", "sample_rows", "[", "accounting_column", "]", ".", "sum", "(", ")", "remaining", "=", "total", "-", "curr_total", "if", "abs", "(", "remaining", ")", "<", "closest_remain", ":", "closest_remain", "=", "abs", "(", "remaining", ")", "closest", "=", "sample_rows", "return", "closest", ",", "matched" ]
Sample rows with accounting with replacement. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. accounting_column: string Name of column with accounting totals/quantities to apply towards the control. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. max_iterations: int, optional, default 50 When using an accounting attribute, the maximum number of sampling iterations that will be applied. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool Indicates if the total was matched exactly.
[ "Sample", "rows", "with", "accounting", "with", "replacement", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/sampling.py#L35-L105
UDST/urbansim
urbansim/utils/sampling.py
accounting_sample_no_replace
def accounting_sample_no_replace(total, data, accounting_column, prob_column=None): """ Samples rows with accounting without replacement. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. accounting_column: string Name of column with accounting totals/quantities to apply towards the control. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool Indicates if the total was matched exactly. """ # make sure this is even feasible if total > data[accounting_column].sum(): raise ValueError('Control total exceeds the available samples') # check for probabilities p = get_probs(data, prob_column) # shuffle the rows if p is None: # random shuffle shuff_idx = np.random.permutation(data.index.values) else: # weighted shuffle ran_p = pd.Series(np.power(np.random.rand(len(p)), 1.0 / p), index=data.index) ran_p.sort_values(ascending=False) shuff_idx = ran_p.index.values # get the initial sample shuffle = data.loc[shuff_idx] csum = np.cumsum(shuffle[accounting_column].values) pos = np.searchsorted(csum, total, 'right') sample = shuffle.iloc[:pos] # refine the sample sample_idx = sample.index.values sample_total = sample[accounting_column].sum() shortage = total - sample_total matched = False for idx, row in shuffle.iloc[pos:].iterrows(): if shortage == 0: # we've matached matched = True break # add the current element if it doesnt exceed the total cnt = row[accounting_column] if cnt <= shortage: sample_idx = np.append(sample_idx, idx) shortage -= cnt return shuffle.loc[sample_idx].copy(), matched
python
def accounting_sample_no_replace(total, data, accounting_column, prob_column=None): if total > data[accounting_column].sum(): raise ValueError('Control total exceeds the available samples') p = get_probs(data, prob_column) if p is None: shuff_idx = np.random.permutation(data.index.values) else: ran_p = pd.Series(np.power(np.random.rand(len(p)), 1.0 / p), index=data.index) ran_p.sort_values(ascending=False) shuff_idx = ran_p.index.values shuffle = data.loc[shuff_idx] csum = np.cumsum(shuffle[accounting_column].values) pos = np.searchsorted(csum, total, 'right') sample = shuffle.iloc[:pos] sample_idx = sample.index.values sample_total = sample[accounting_column].sum() shortage = total - sample_total matched = False for idx, row in shuffle.iloc[pos:].iterrows(): if shortage == 0: matched = True break cnt = row[accounting_column] if cnt <= shortage: sample_idx = np.append(sample_idx, idx) shortage -= cnt return shuffle.loc[sample_idx].copy(), matched
[ "def", "accounting_sample_no_replace", "(", "total", ",", "data", ",", "accounting_column", ",", "prob_column", "=", "None", ")", ":", "# make sure this is even feasible", "if", "total", ">", "data", "[", "accounting_column", "]", ".", "sum", "(", ")", ":", "raise", "ValueError", "(", "'Control total exceeds the available samples'", ")", "# check for probabilities", "p", "=", "get_probs", "(", "data", ",", "prob_column", ")", "# shuffle the rows", "if", "p", "is", "None", ":", "# random shuffle", "shuff_idx", "=", "np", ".", "random", ".", "permutation", "(", "data", ".", "index", ".", "values", ")", "else", ":", "# weighted shuffle", "ran_p", "=", "pd", ".", "Series", "(", "np", ".", "power", "(", "np", ".", "random", ".", "rand", "(", "len", "(", "p", ")", ")", ",", "1.0", "/", "p", ")", ",", "index", "=", "data", ".", "index", ")", "ran_p", ".", "sort_values", "(", "ascending", "=", "False", ")", "shuff_idx", "=", "ran_p", ".", "index", ".", "values", "# get the initial sample", "shuffle", "=", "data", ".", "loc", "[", "shuff_idx", "]", "csum", "=", "np", ".", "cumsum", "(", "shuffle", "[", "accounting_column", "]", ".", "values", ")", "pos", "=", "np", ".", "searchsorted", "(", "csum", ",", "total", ",", "'right'", ")", "sample", "=", "shuffle", ".", "iloc", "[", ":", "pos", "]", "# refine the sample", "sample_idx", "=", "sample", ".", "index", ".", "values", "sample_total", "=", "sample", "[", "accounting_column", "]", ".", "sum", "(", ")", "shortage", "=", "total", "-", "sample_total", "matched", "=", "False", "for", "idx", ",", "row", "in", "shuffle", ".", "iloc", "[", "pos", ":", "]", ".", "iterrows", "(", ")", ":", "if", "shortage", "==", "0", ":", "# we've matached", "matched", "=", "True", "break", "# add the current element if it doesnt exceed the total", "cnt", "=", "row", "[", "accounting_column", "]", "if", "cnt", "<=", "shortage", ":", "sample_idx", "=", "np", ".", "append", "(", "sample_idx", ",", "idx", ")", "shortage", "-=", "cnt", "return", "shuffle", ".", "loc", "[", "sample_idx", "]", ".", "copy", "(", ")", ",", "matched" ]
Samples rows with accounting without replacement. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. accounting_column: string Name of column with accounting totals/quantities to apply towards the control. prob_column: string, optional, default None Name of the column in the data to provide probabilities or weights. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool Indicates if the total was matched exactly.
[ "Samples", "rows", "with", "accounting", "without", "replacement", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/sampling.py#L108-L172
UDST/urbansim
urbansim/utils/sampling.py
sample_rows
def sample_rows(total, data, replace=True, accounting_column=None, max_iterations=50, prob_column=None, return_status=False): """ Samples and returns rows from a data frame while matching a desired control total. The total may represent a simple row count or may attempt to match a sum/quantity from an accounting column. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. replace: bool, optional, default True Indicates if sampling with or without replacement. accounting_column: string, optional Name of column with accounting totals/quantities to apply towards the control. If not provided then row counts will be used for accounting. max_iterations: int, optional, default 50 When using an accounting attribute, the maximum number of sampling iterations that will be applied. Only applicable when sampling with replacement. prob_column: string, optional, default None If provided, name of the column in the data frame to provide probabilities or weights. If not provided, the sampling is random. return_status: bool, optional, default True If True, will also return a bool indicating if the total was matched exactly. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool If return_status is True, returns True if total is matched exactly. """ if not data.index.is_unique: raise ValueError('Data must have a unique index') # simplest case, just return n random rows if accounting_column is None: if replace is False and total > len(data.index.values): raise ValueError('Control total exceeds the available samples') p = get_probs(prob_column) rows = data.loc[np.random.choice( data.index.values, int(total), replace=replace, p=p)].copy() matched = True # sample with accounting else: if replace: rows, matched = accounting_sample_replace( total, data, accounting_column, prob_column, max_iterations) else: rows, matched = accounting_sample_no_replace( total, data, accounting_column, prob_column) # return the results if return_status: return rows, matched else: return rows
python
def sample_rows(total, data, replace=True, accounting_column=None, max_iterations=50, prob_column=None, return_status=False): if not data.index.is_unique: raise ValueError('Data must have a unique index') if accounting_column is None: if replace is False and total > len(data.index.values): raise ValueError('Control total exceeds the available samples') p = get_probs(prob_column) rows = data.loc[np.random.choice( data.index.values, int(total), replace=replace, p=p)].copy() matched = True else: if replace: rows, matched = accounting_sample_replace( total, data, accounting_column, prob_column, max_iterations) else: rows, matched = accounting_sample_no_replace( total, data, accounting_column, prob_column) if return_status: return rows, matched else: return rows
[ "def", "sample_rows", "(", "total", ",", "data", ",", "replace", "=", "True", ",", "accounting_column", "=", "None", ",", "max_iterations", "=", "50", ",", "prob_column", "=", "None", ",", "return_status", "=", "False", ")", ":", "if", "not", "data", ".", "index", ".", "is_unique", ":", "raise", "ValueError", "(", "'Data must have a unique index'", ")", "# simplest case, just return n random rows", "if", "accounting_column", "is", "None", ":", "if", "replace", "is", "False", "and", "total", ">", "len", "(", "data", ".", "index", ".", "values", ")", ":", "raise", "ValueError", "(", "'Control total exceeds the available samples'", ")", "p", "=", "get_probs", "(", "prob_column", ")", "rows", "=", "data", ".", "loc", "[", "np", ".", "random", ".", "choice", "(", "data", ".", "index", ".", "values", ",", "int", "(", "total", ")", ",", "replace", "=", "replace", ",", "p", "=", "p", ")", "]", ".", "copy", "(", ")", "matched", "=", "True", "# sample with accounting", "else", ":", "if", "replace", ":", "rows", ",", "matched", "=", "accounting_sample_replace", "(", "total", ",", "data", ",", "accounting_column", ",", "prob_column", ",", "max_iterations", ")", "else", ":", "rows", ",", "matched", "=", "accounting_sample_no_replace", "(", "total", ",", "data", ",", "accounting_column", ",", "prob_column", ")", "# return the results", "if", "return_status", ":", "return", "rows", ",", "matched", "else", ":", "return", "rows" ]
Samples and returns rows from a data frame while matching a desired control total. The total may represent a simple row count or may attempt to match a sum/quantity from an accounting column. Parameters ---------- total : int The control total the sampled rows will attempt to match. data: pandas.DataFrame Table to sample from. replace: bool, optional, default True Indicates if sampling with or without replacement. accounting_column: string, optional Name of column with accounting totals/quantities to apply towards the control. If not provided then row counts will be used for accounting. max_iterations: int, optional, default 50 When using an accounting attribute, the maximum number of sampling iterations that will be applied. Only applicable when sampling with replacement. prob_column: string, optional, default None If provided, name of the column in the data frame to provide probabilities or weights. If not provided, the sampling is random. return_status: bool, optional, default True If True, will also return a bool indicating if the total was matched exactly. Returns ------- sample_rows : pandas.DataFrame Table containing the sample. matched: bool If return_status is True, returns True if total is matched exactly.
[ "Samples", "and", "returns", "rows", "from", "a", "data", "frame", "while", "matching", "a", "desired", "control", "total", ".", "The", "total", "may", "represent", "a", "simple", "row", "count", "or", "may", "attempt", "to", "match", "a", "sum", "/", "quantity", "from", "an", "accounting", "column", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/sampling.py#L175-L234
UDST/urbansim
urbansim/developer/sqftproforma.py
SqFtProFormaConfig._convert_types
def _convert_types(self): """ convert lists and dictionaries that are useful for users to np vectors that are usable by machines """ self.fars = np.array(self.fars) self.parking_rates = np.array([self.parking_rates[use] for use in self.uses]) self.res_ratios = {} assert len(self.uses) == len(self.residential_uses) for k, v in self.forms.items(): self.forms[k] = np.array([self.forms[k].get(use, 0.0) for use in self.uses]) # normalize if not already self.forms[k] /= self.forms[k].sum() self.res_ratios[k] = pd.Series(self.forms[k])[self.residential_uses].sum() self.costs = np.transpose(np.array([self.costs[use] for use in self.uses]))
python
def _convert_types(self): self.fars = np.array(self.fars) self.parking_rates = np.array([self.parking_rates[use] for use in self.uses]) self.res_ratios = {} assert len(self.uses) == len(self.residential_uses) for k, v in self.forms.items(): self.forms[k] = np.array([self.forms[k].get(use, 0.0) for use in self.uses]) self.forms[k] /= self.forms[k].sum() self.res_ratios[k] = pd.Series(self.forms[k])[self.residential_uses].sum() self.costs = np.transpose(np.array([self.costs[use] for use in self.uses]))
[ "def", "_convert_types", "(", "self", ")", ":", "self", ".", "fars", "=", "np", ".", "array", "(", "self", ".", "fars", ")", "self", ".", "parking_rates", "=", "np", ".", "array", "(", "[", "self", ".", "parking_rates", "[", "use", "]", "for", "use", "in", "self", ".", "uses", "]", ")", "self", ".", "res_ratios", "=", "{", "}", "assert", "len", "(", "self", ".", "uses", ")", "==", "len", "(", "self", ".", "residential_uses", ")", "for", "k", ",", "v", "in", "self", ".", "forms", ".", "items", "(", ")", ":", "self", ".", "forms", "[", "k", "]", "=", "np", ".", "array", "(", "[", "self", ".", "forms", "[", "k", "]", ".", "get", "(", "use", ",", "0.0", ")", "for", "use", "in", "self", ".", "uses", "]", ")", "# normalize if not already", "self", ".", "forms", "[", "k", "]", "/=", "self", ".", "forms", "[", "k", "]", ".", "sum", "(", ")", "self", ".", "res_ratios", "[", "k", "]", "=", "pd", ".", "Series", "(", "self", ".", "forms", "[", "k", "]", ")", "[", "self", ".", "residential_uses", "]", ".", "sum", "(", ")", "self", ".", "costs", "=", "np", ".", "transpose", "(", "np", ".", "array", "(", "[", "self", ".", "costs", "[", "use", "]", "for", "use", "in", "self", ".", "uses", "]", ")", ")" ]
convert lists and dictionaries that are useful for users to np vectors that are usable by machines
[ "convert", "lists", "and", "dictionaries", "that", "are", "useful", "for", "users", "to", "np", "vectors", "that", "are", "usable", "by", "machines" ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L192-L207
UDST/urbansim
urbansim/developer/sqftproforma.py
SqFtProForma._building_cost
def _building_cost(self, use_mix, stories): """ Generate building cost for a set of buildings Parameters ---------- use_mix : array The mix of uses for this form stories : series A Pandas Series of stories Returns ------- array The cost per sqft for this unit mix and height. """ c = self.config # stories to heights heights = stories * c.height_per_story # cost index for this height costs = np.searchsorted(c.heights_for_costs, heights) # this will get set to nan later costs[np.isnan(heights)] = 0 # compute cost with matrix multiply costs = np.dot(np.squeeze(c.costs[costs.astype('int32')]), use_mix) # some heights aren't allowed - cost should be nan costs[np.isnan(stories).flatten()] = np.nan return costs.flatten()
python
def _building_cost(self, use_mix, stories): c = self.config heights = stories * c.height_per_story costs = np.searchsorted(c.heights_for_costs, heights) costs[np.isnan(heights)] = 0 costs = np.dot(np.squeeze(c.costs[costs.astype('int32')]), use_mix) costs[np.isnan(stories).flatten()] = np.nan return costs.flatten()
[ "def", "_building_cost", "(", "self", ",", "use_mix", ",", "stories", ")", ":", "c", "=", "self", ".", "config", "# stories to heights", "heights", "=", "stories", "*", "c", ".", "height_per_story", "# cost index for this height", "costs", "=", "np", ".", "searchsorted", "(", "c", ".", "heights_for_costs", ",", "heights", ")", "# this will get set to nan later", "costs", "[", "np", ".", "isnan", "(", "heights", ")", "]", "=", "0", "# compute cost with matrix multiply", "costs", "=", "np", ".", "dot", "(", "np", ".", "squeeze", "(", "c", ".", "costs", "[", "costs", ".", "astype", "(", "'int32'", ")", "]", ")", ",", "use_mix", ")", "# some heights aren't allowed - cost should be nan", "costs", "[", "np", ".", "isnan", "(", "stories", ")", ".", "flatten", "(", ")", "]", "=", "np", ".", "nan", "return", "costs", ".", "flatten", "(", ")" ]
Generate building cost for a set of buildings Parameters ---------- use_mix : array The mix of uses for this form stories : series A Pandas Series of stories Returns ------- array The cost per sqft for this unit mix and height.
[ "Generate", "building", "cost", "for", "a", "set", "of", "buildings" ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L279-L307
UDST/urbansim
urbansim/developer/sqftproforma.py
SqFtProForma._generate_lookup
def _generate_lookup(self): """ Run the developer model on all possible inputs specified in the configuration object - not generally called by the user. This part computes the final cost per sqft of the building to construct and then turns it into the yearly rent necessary to make break even on that cost. """ c = self.config # get all the building forms we can use keys = c.forms.keys() keys = sorted(keys) df_d = {} for name in keys: # get the use distribution for each uses_distrib = c.forms[name] for parking_config in c.parking_configs: # going to make a dataframe to store values to make # pro forma results transparent df = pd.DataFrame(index=c.fars) df['far'] = c.fars df['pclsz'] = c.tiled_parcel_sizes building_bulk = np.reshape( c.parcel_sizes, (-1, 1)) * np.reshape(c.fars, (1, -1)) building_bulk = np.reshape(building_bulk, (-1, 1)) # need to converge in on exactly how much far is available for # deck pkg if parking_config == 'deck': building_bulk /= (1.0 + np.sum(uses_distrib * c.parking_rates) * c.parking_sqft_d[parking_config] / c.sqft_per_rate) df['building_sqft'] = building_bulk parkingstalls = building_bulk * \ np.sum(uses_distrib * c.parking_rates) / c.sqft_per_rate parking_cost = (c.parking_cost_d[parking_config] * parkingstalls * c.parking_sqft_d[parking_config]) df['spaces'] = parkingstalls if parking_config == 'underground': df['park_sqft'] = parkingstalls * \ c.parking_sqft_d[parking_config] stories = building_bulk / c.tiled_parcel_sizes if parking_config == 'deck': df['park_sqft'] = parkingstalls * \ c.parking_sqft_d[parking_config] stories = ((building_bulk + parkingstalls * c.parking_sqft_d[parking_config]) / c.tiled_parcel_sizes) if parking_config == 'surface': stories = building_bulk / \ (c.tiled_parcel_sizes - parkingstalls * c.parking_sqft_d[parking_config]) df['park_sqft'] = 0 # not all fars support surface parking stories[stories < 0.0] = np.nan # I think we can assume that stories over 3 # do not work with surface parking stories[stories > 5.0] = np.nan df['total_built_sqft'] = df.building_sqft + df.park_sqft df['parking_sqft_ratio'] = df.park_sqft / df.total_built_sqft stories /= c.parcel_coverage df['stories'] = np.ceil(stories) df['height'] = df.stories * c.height_per_story df['build_cost_sqft'] = self._building_cost(uses_distrib, stories) df['build_cost'] = df.build_cost_sqft * df.building_sqft df['park_cost'] = parking_cost df['cost'] = df.build_cost + df.park_cost df['ave_cost_sqft'] = (df.cost / df.total_built_sqft) * c.profit_factor if name == 'retail': df['ave_cost_sqft'][c.fars > c.max_retail_height] = np.nan if name == 'industrial': df['ave_cost_sqft'][c.fars > c.max_industrial_height] = np.nan df_d[(name, parking_config)] = df self.dev_d = df_d
python
def _generate_lookup(self): c = self.config keys = c.forms.keys() keys = sorted(keys) df_d = {} for name in keys: uses_distrib = c.forms[name] for parking_config in c.parking_configs: df = pd.DataFrame(index=c.fars) df['far'] = c.fars df['pclsz'] = c.tiled_parcel_sizes building_bulk = np.reshape( c.parcel_sizes, (-1, 1)) * np.reshape(c.fars, (1, -1)) building_bulk = np.reshape(building_bulk, (-1, 1)) if parking_config == 'deck': building_bulk /= (1.0 + np.sum(uses_distrib * c.parking_rates) * c.parking_sqft_d[parking_config] / c.sqft_per_rate) df['building_sqft'] = building_bulk parkingstalls = building_bulk * \ np.sum(uses_distrib * c.parking_rates) / c.sqft_per_rate parking_cost = (c.parking_cost_d[parking_config] * parkingstalls * c.parking_sqft_d[parking_config]) df['spaces'] = parkingstalls if parking_config == 'underground': df['park_sqft'] = parkingstalls * \ c.parking_sqft_d[parking_config] stories = building_bulk / c.tiled_parcel_sizes if parking_config == 'deck': df['park_sqft'] = parkingstalls * \ c.parking_sqft_d[parking_config] stories = ((building_bulk + parkingstalls * c.parking_sqft_d[parking_config]) / c.tiled_parcel_sizes) if parking_config == 'surface': stories = building_bulk / \ (c.tiled_parcel_sizes - parkingstalls * c.parking_sqft_d[parking_config]) df['park_sqft'] = 0 stories[stories < 0.0] = np.nan stories[stories > 5.0] = np.nan df['total_built_sqft'] = df.building_sqft + df.park_sqft df['parking_sqft_ratio'] = df.park_sqft / df.total_built_sqft stories /= c.parcel_coverage df['stories'] = np.ceil(stories) df['height'] = df.stories * c.height_per_story df['build_cost_sqft'] = self._building_cost(uses_distrib, stories) df['build_cost'] = df.build_cost_sqft * df.building_sqft df['park_cost'] = parking_cost df['cost'] = df.build_cost + df.park_cost df['ave_cost_sqft'] = (df.cost / df.total_built_sqft) * c.profit_factor if name == 'retail': df['ave_cost_sqft'][c.fars > c.max_retail_height] = np.nan if name == 'industrial': df['ave_cost_sqft'][c.fars > c.max_industrial_height] = np.nan df_d[(name, parking_config)] = df self.dev_d = df_d
[ "def", "_generate_lookup", "(", "self", ")", ":", "c", "=", "self", ".", "config", "# get all the building forms we can use", "keys", "=", "c", ".", "forms", ".", "keys", "(", ")", "keys", "=", "sorted", "(", "keys", ")", "df_d", "=", "{", "}", "for", "name", "in", "keys", ":", "# get the use distribution for each", "uses_distrib", "=", "c", ".", "forms", "[", "name", "]", "for", "parking_config", "in", "c", ".", "parking_configs", ":", "# going to make a dataframe to store values to make", "# pro forma results transparent", "df", "=", "pd", ".", "DataFrame", "(", "index", "=", "c", ".", "fars", ")", "df", "[", "'far'", "]", "=", "c", ".", "fars", "df", "[", "'pclsz'", "]", "=", "c", ".", "tiled_parcel_sizes", "building_bulk", "=", "np", ".", "reshape", "(", "c", ".", "parcel_sizes", ",", "(", "-", "1", ",", "1", ")", ")", "*", "np", ".", "reshape", "(", "c", ".", "fars", ",", "(", "1", ",", "-", "1", ")", ")", "building_bulk", "=", "np", ".", "reshape", "(", "building_bulk", ",", "(", "-", "1", ",", "1", ")", ")", "# need to converge in on exactly how much far is available for", "# deck pkg", "if", "parking_config", "==", "'deck'", ":", "building_bulk", "/=", "(", "1.0", "+", "np", ".", "sum", "(", "uses_distrib", "*", "c", ".", "parking_rates", ")", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", "/", "c", ".", "sqft_per_rate", ")", "df", "[", "'building_sqft'", "]", "=", "building_bulk", "parkingstalls", "=", "building_bulk", "*", "np", ".", "sum", "(", "uses_distrib", "*", "c", ".", "parking_rates", ")", "/", "c", ".", "sqft_per_rate", "parking_cost", "=", "(", "c", ".", "parking_cost_d", "[", "parking_config", "]", "*", "parkingstalls", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", ")", "df", "[", "'spaces'", "]", "=", "parkingstalls", "if", "parking_config", "==", "'underground'", ":", "df", "[", "'park_sqft'", "]", "=", "parkingstalls", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", "stories", "=", "building_bulk", "/", "c", ".", "tiled_parcel_sizes", "if", "parking_config", "==", "'deck'", ":", "df", "[", "'park_sqft'", "]", "=", "parkingstalls", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", "stories", "=", "(", "(", "building_bulk", "+", "parkingstalls", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", ")", "/", "c", ".", "tiled_parcel_sizes", ")", "if", "parking_config", "==", "'surface'", ":", "stories", "=", "building_bulk", "/", "(", "c", ".", "tiled_parcel_sizes", "-", "parkingstalls", "*", "c", ".", "parking_sqft_d", "[", "parking_config", "]", ")", "df", "[", "'park_sqft'", "]", "=", "0", "# not all fars support surface parking", "stories", "[", "stories", "<", "0.0", "]", "=", "np", ".", "nan", "# I think we can assume that stories over 3", "# do not work with surface parking", "stories", "[", "stories", ">", "5.0", "]", "=", "np", ".", "nan", "df", "[", "'total_built_sqft'", "]", "=", "df", ".", "building_sqft", "+", "df", ".", "park_sqft", "df", "[", "'parking_sqft_ratio'", "]", "=", "df", ".", "park_sqft", "/", "df", ".", "total_built_sqft", "stories", "/=", "c", ".", "parcel_coverage", "df", "[", "'stories'", "]", "=", "np", ".", "ceil", "(", "stories", ")", "df", "[", "'height'", "]", "=", "df", ".", "stories", "*", "c", ".", "height_per_story", "df", "[", "'build_cost_sqft'", "]", "=", "self", ".", "_building_cost", "(", "uses_distrib", ",", "stories", ")", "df", "[", "'build_cost'", "]", "=", "df", ".", "build_cost_sqft", "*", "df", ".", "building_sqft", "df", "[", "'park_cost'", "]", "=", "parking_cost", "df", "[", "'cost'", "]", "=", "df", ".", "build_cost", "+", "df", ".", "park_cost", "df", "[", "'ave_cost_sqft'", "]", "=", "(", "df", ".", "cost", "/", "df", ".", "total_built_sqft", ")", "*", "c", ".", "profit_factor", "if", "name", "==", "'retail'", ":", "df", "[", "'ave_cost_sqft'", "]", "[", "c", ".", "fars", ">", "c", ".", "max_retail_height", "]", "=", "np", ".", "nan", "if", "name", "==", "'industrial'", ":", "df", "[", "'ave_cost_sqft'", "]", "[", "c", ".", "fars", ">", "c", ".", "max_industrial_height", "]", "=", "np", ".", "nan", "df_d", "[", "(", "name", ",", "parking_config", ")", "]", "=", "df", "self", ".", "dev_d", "=", "df_d" ]
Run the developer model on all possible inputs specified in the configuration object - not generally called by the user. This part computes the final cost per sqft of the building to construct and then turns it into the yearly rent necessary to make break even on that cost.
[ "Run", "the", "developer", "model", "on", "all", "possible", "inputs", "specified", "in", "the", "configuration", "object", "-", "not", "generally", "called", "by", "the", "user", ".", "This", "part", "computes", "the", "final", "cost", "per", "sqft", "of", "the", "building", "to", "construct", "and", "then", "turns", "it", "into", "the", "yearly", "rent", "necessary", "to", "make", "break", "even", "on", "that", "cost", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L309-L398
UDST/urbansim
urbansim/developer/sqftproforma.py
SqFtProForma.lookup
def lookup(self, form, df, only_built=True, pass_through=None): """ This function does the developer model lookups for all the actual input data. Parameters ---------- form : string One of the forms specified in the configuration file df: dataframe Pass in a single data frame which is indexed by parcel_id and has the following columns only_built : bool Whether to return only those buildings that are profitable and allowed by zoning, or whether to return as much information as possible, even if unlikely to be built (can be used when development might be subsidized or when debugging) pass_through : list of strings List of field names to take from the input parcel frame and pass to the output feasibility frame - is usually used for debugging purposes - these fields will be passed all the way through developer Input Dataframe Columns rent : dataframe A set of columns, one for each of the uses passed in the configuration. Values are yearly rents for that use. Typical column names would be "residential", "retail", "industrial" and "office" land_cost : series A series representing the CURRENT yearly rent for each parcel. Used to compute acquisition costs for the parcel. parcel_size : series A series representing the parcel size for each parcel. max_far : series A series representing the maximum far allowed by zoning. Buildings will not be built above these fars. max_height : series A series representing the maxmium height allowed by zoning. Buildings will not be built above these heights. Will pick between the min of the far and height, will ignore on of them if one is nan, but will not build if both are nan. max_dua : series, optional A series representing the maximum dwelling units per acre allowed by zoning. If max_dua is passed, the average unit size should be passed below to translate from dua to floor space. ave_unit_size : series, optional This is required if max_dua is passed above, otherwise it is optional. This is the same as the parameter to Developer.pick() (it should be the same series). Returns ------- index : Series, int parcel identifiers building_sqft : Series, float The number of square feet for the building to build. Keep in mind this includes parking and common space. Will need a helpful function to convert from gross square feet to actual usable square feet in residential units. building_cost : Series, float The cost of constructing the building as given by the ave_cost_per_sqft from the cost model (for this FAR) and the number of square feet. total_cost : Series, float The cost of constructing the building plus the cost of acquisition of the current parcel/building. building_revenue : Series, float The NPV of the revenue for the building to be built, which is the number of square feet times the yearly rent divided by the cap rate (with a few adjustment factors including building efficiency). max_profit_far : Series, float The FAR of the maximum profit building (constrained by the max_far and max_height from the input dataframe). max_profit : The profit for the maximum profit building (constrained by the max_far and max_height from the input dataframe). """ df = pd.concat(self._lookup_parking_cfg(form, parking_config, df, only_built, pass_through) for parking_config in self.config.parking_configs) if len(df) == 0: return pd.DataFrame() max_profit_ind = df.pivot( columns="parking_config", values="max_profit").idxmax(axis=1).to_frame("parking_config") df.set_index(["parking_config"], append=True, inplace=True) max_profit_ind.set_index(["parking_config"], append=True, inplace=True) # get the max_profit idx return df.loc[max_profit_ind.index].reset_index(1)
python
def lookup(self, form, df, only_built=True, pass_through=None): df = pd.concat(self._lookup_parking_cfg(form, parking_config, df, only_built, pass_through) for parking_config in self.config.parking_configs) if len(df) == 0: return pd.DataFrame() max_profit_ind = df.pivot( columns="parking_config", values="max_profit").idxmax(axis=1).to_frame("parking_config") df.set_index(["parking_config"], append=True, inplace=True) max_profit_ind.set_index(["parking_config"], append=True, inplace=True) return df.loc[max_profit_ind.index].reset_index(1)
[ "def", "lookup", "(", "self", ",", "form", ",", "df", ",", "only_built", "=", "True", ",", "pass_through", "=", "None", ")", ":", "df", "=", "pd", ".", "concat", "(", "self", ".", "_lookup_parking_cfg", "(", "form", ",", "parking_config", ",", "df", ",", "only_built", ",", "pass_through", ")", "for", "parking_config", "in", "self", ".", "config", ".", "parking_configs", ")", "if", "len", "(", "df", ")", "==", "0", ":", "return", "pd", ".", "DataFrame", "(", ")", "max_profit_ind", "=", "df", ".", "pivot", "(", "columns", "=", "\"parking_config\"", ",", "values", "=", "\"max_profit\"", ")", ".", "idxmax", "(", "axis", "=", "1", ")", ".", "to_frame", "(", "\"parking_config\"", ")", "df", ".", "set_index", "(", "[", "\"parking_config\"", "]", ",", "append", "=", "True", ",", "inplace", "=", "True", ")", "max_profit_ind", ".", "set_index", "(", "[", "\"parking_config\"", "]", ",", "append", "=", "True", ",", "inplace", "=", "True", ")", "# get the max_profit idx", "return", "df", ".", "loc", "[", "max_profit_ind", ".", "index", "]", ".", "reset_index", "(", "1", ")" ]
This function does the developer model lookups for all the actual input data. Parameters ---------- form : string One of the forms specified in the configuration file df: dataframe Pass in a single data frame which is indexed by parcel_id and has the following columns only_built : bool Whether to return only those buildings that are profitable and allowed by zoning, or whether to return as much information as possible, even if unlikely to be built (can be used when development might be subsidized or when debugging) pass_through : list of strings List of field names to take from the input parcel frame and pass to the output feasibility frame - is usually used for debugging purposes - these fields will be passed all the way through developer Input Dataframe Columns rent : dataframe A set of columns, one for each of the uses passed in the configuration. Values are yearly rents for that use. Typical column names would be "residential", "retail", "industrial" and "office" land_cost : series A series representing the CURRENT yearly rent for each parcel. Used to compute acquisition costs for the parcel. parcel_size : series A series representing the parcel size for each parcel. max_far : series A series representing the maximum far allowed by zoning. Buildings will not be built above these fars. max_height : series A series representing the maxmium height allowed by zoning. Buildings will not be built above these heights. Will pick between the min of the far and height, will ignore on of them if one is nan, but will not build if both are nan. max_dua : series, optional A series representing the maximum dwelling units per acre allowed by zoning. If max_dua is passed, the average unit size should be passed below to translate from dua to floor space. ave_unit_size : series, optional This is required if max_dua is passed above, otherwise it is optional. This is the same as the parameter to Developer.pick() (it should be the same series). Returns ------- index : Series, int parcel identifiers building_sqft : Series, float The number of square feet for the building to build. Keep in mind this includes parking and common space. Will need a helpful function to convert from gross square feet to actual usable square feet in residential units. building_cost : Series, float The cost of constructing the building as given by the ave_cost_per_sqft from the cost model (for this FAR) and the number of square feet. total_cost : Series, float The cost of constructing the building plus the cost of acquisition of the current parcel/building. building_revenue : Series, float The NPV of the revenue for the building to be built, which is the number of square feet times the yearly rent divided by the cap rate (with a few adjustment factors including building efficiency). max_profit_far : Series, float The FAR of the maximum profit building (constrained by the max_far and max_height from the input dataframe). max_profit : The profit for the maximum profit building (constrained by the max_far and max_height from the input dataframe).
[ "This", "function", "does", "the", "developer", "model", "lookups", "for", "all", "the", "actual", "input", "data", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L445-L537
UDST/urbansim
urbansim/developer/sqftproforma.py
SqFtProForma._debug_output
def _debug_output(self): """ this code creates the debugging plots to understand the behavior of the hypothetical building model """ import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt c = self.config df_d = self.dev_d keys = df_d.keys() keys = sorted(keys) for key in keys: logger.debug("\n" + str(key) + "\n") logger.debug(df_d[key]) for form in self.config.forms: logger.debug("\n" + str(key) + "\n") logger.debug(self.get_ave_cost_sqft(form, "surface")) keys = c.forms.keys() keys = sorted(keys) cnt = 1 share = None fig = plt.figure(figsize=(12, 3 * len(keys))) fig.suptitle('Profitable rents by use', fontsize=40) for name in keys: sumdf = None for parking_config in c.parking_configs: df = df_d[(name, parking_config)] if sumdf is None: sumdf = pd.DataFrame(df['far']) sumdf[parking_config] = df['ave_cost_sqft'] far = sumdf['far'] del sumdf['far'] if share is None: share = plt.subplot(len(keys) / 2, 2, cnt) else: plt.subplot(len(keys) / 2, 2, cnt, sharex=share, sharey=share) handles = plt.plot(far, sumdf) plt.ylabel('even_rent') plt.xlabel('FAR') plt.title('Rents for use type %s' % name) plt.legend( handles, c.parking_configs, loc='lower right', title='Parking type') cnt += 1 plt.savefig('even_rents.png', bbox_inches=0)
python
def _debug_output(self): import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt c = self.config df_d = self.dev_d keys = df_d.keys() keys = sorted(keys) for key in keys: logger.debug("\n" + str(key) + "\n") logger.debug(df_d[key]) for form in self.config.forms: logger.debug("\n" + str(key) + "\n") logger.debug(self.get_ave_cost_sqft(form, "surface")) keys = c.forms.keys() keys = sorted(keys) cnt = 1 share = None fig = plt.figure(figsize=(12, 3 * len(keys))) fig.suptitle('Profitable rents by use', fontsize=40) for name in keys: sumdf = None for parking_config in c.parking_configs: df = df_d[(name, parking_config)] if sumdf is None: sumdf = pd.DataFrame(df['far']) sumdf[parking_config] = df['ave_cost_sqft'] far = sumdf['far'] del sumdf['far'] if share is None: share = plt.subplot(len(keys) / 2, 2, cnt) else: plt.subplot(len(keys) / 2, 2, cnt, sharex=share, sharey=share) handles = plt.plot(far, sumdf) plt.ylabel('even_rent') plt.xlabel('FAR') plt.title('Rents for use type %s' % name) plt.legend( handles, c.parking_configs, loc='lower right', title='Parking type') cnt += 1 plt.savefig('even_rents.png', bbox_inches=0)
[ "def", "_debug_output", "(", "self", ")", ":", "import", "matplotlib", "matplotlib", ".", "use", "(", "'Agg'", ")", "import", "matplotlib", ".", "pyplot", "as", "plt", "c", "=", "self", ".", "config", "df_d", "=", "self", ".", "dev_d", "keys", "=", "df_d", ".", "keys", "(", ")", "keys", "=", "sorted", "(", "keys", ")", "for", "key", "in", "keys", ":", "logger", ".", "debug", "(", "\"\\n\"", "+", "str", "(", "key", ")", "+", "\"\\n\"", ")", "logger", ".", "debug", "(", "df_d", "[", "key", "]", ")", "for", "form", "in", "self", ".", "config", ".", "forms", ":", "logger", ".", "debug", "(", "\"\\n\"", "+", "str", "(", "key", ")", "+", "\"\\n\"", ")", "logger", ".", "debug", "(", "self", ".", "get_ave_cost_sqft", "(", "form", ",", "\"surface\"", ")", ")", "keys", "=", "c", ".", "forms", ".", "keys", "(", ")", "keys", "=", "sorted", "(", "keys", ")", "cnt", "=", "1", "share", "=", "None", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "12", ",", "3", "*", "len", "(", "keys", ")", ")", ")", "fig", ".", "suptitle", "(", "'Profitable rents by use'", ",", "fontsize", "=", "40", ")", "for", "name", "in", "keys", ":", "sumdf", "=", "None", "for", "parking_config", "in", "c", ".", "parking_configs", ":", "df", "=", "df_d", "[", "(", "name", ",", "parking_config", ")", "]", "if", "sumdf", "is", "None", ":", "sumdf", "=", "pd", ".", "DataFrame", "(", "df", "[", "'far'", "]", ")", "sumdf", "[", "parking_config", "]", "=", "df", "[", "'ave_cost_sqft'", "]", "far", "=", "sumdf", "[", "'far'", "]", "del", "sumdf", "[", "'far'", "]", "if", "share", "is", "None", ":", "share", "=", "plt", ".", "subplot", "(", "len", "(", "keys", ")", "/", "2", ",", "2", ",", "cnt", ")", "else", ":", "plt", ".", "subplot", "(", "len", "(", "keys", ")", "/", "2", ",", "2", ",", "cnt", ",", "sharex", "=", "share", ",", "sharey", "=", "share", ")", "handles", "=", "plt", ".", "plot", "(", "far", ",", "sumdf", ")", "plt", ".", "ylabel", "(", "'even_rent'", ")", "plt", ".", "xlabel", "(", "'FAR'", ")", "plt", ".", "title", "(", "'Rents for use type %s'", "%", "name", ")", "plt", ".", "legend", "(", "handles", ",", "c", ".", "parking_configs", ",", "loc", "=", "'lower right'", ",", "title", "=", "'Parking type'", ")", "cnt", "+=", "1", "plt", ".", "savefig", "(", "'even_rents.png'", ",", "bbox_inches", "=", "0", ")" ]
this code creates the debugging plots to understand the behavior of the hypothetical building model
[ "this", "code", "creates", "the", "debugging", "plots", "to", "understand", "the", "behavior", "of", "the", "hypothetical", "building", "model" ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L666-L716
UDST/urbansim
urbansim/models/transition.py
add_rows
def add_rows(data, nrows, starting_index=None, accounting_column=None): """ Add rows to data table according to a given nrows. New rows will have their IDs set to NaN. Parameters ---------- data : pandas.DataFrame nrows : int Number of rows to add. starting_index : int, optional The starting index from which to calculate indexes for the new rows. If not given the max + 1 of the index of `data` will be used. accounting_column: string, optional Name of column with accounting totals/quanties to apply towards the control. If not provided then row counts will be used for accounting. Returns ------- updated : pandas.DataFrame Table with rows added. New rows will have their index values set to NaN. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries. """ logger.debug('start: adding {} rows in transition model'.format(nrows)) if nrows == 0: return data, _empty_index(), _empty_index() if not starting_index: starting_index = data.index.values.max() + 1 new_rows = sample_rows(nrows, data, accounting_column=accounting_column) copied_index = new_rows.index added_index = pd.Index(np.arange( starting_index, starting_index + len(new_rows.index), dtype=np.int)) new_rows.index = added_index logger.debug( 'finish: added {} rows in transition model'.format(len(new_rows))) return pd.concat([data, new_rows]), added_index, copied_index
python
def add_rows(data, nrows, starting_index=None, accounting_column=None): logger.debug('start: adding {} rows in transition model'.format(nrows)) if nrows == 0: return data, _empty_index(), _empty_index() if not starting_index: starting_index = data.index.values.max() + 1 new_rows = sample_rows(nrows, data, accounting_column=accounting_column) copied_index = new_rows.index added_index = pd.Index(np.arange( starting_index, starting_index + len(new_rows.index), dtype=np.int)) new_rows.index = added_index logger.debug( 'finish: added {} rows in transition model'.format(len(new_rows))) return pd.concat([data, new_rows]), added_index, copied_index
[ "def", "add_rows", "(", "data", ",", "nrows", ",", "starting_index", "=", "None", ",", "accounting_column", "=", "None", ")", ":", "logger", ".", "debug", "(", "'start: adding {} rows in transition model'", ".", "format", "(", "nrows", ")", ")", "if", "nrows", "==", "0", ":", "return", "data", ",", "_empty_index", "(", ")", ",", "_empty_index", "(", ")", "if", "not", "starting_index", ":", "starting_index", "=", "data", ".", "index", ".", "values", ".", "max", "(", ")", "+", "1", "new_rows", "=", "sample_rows", "(", "nrows", ",", "data", ",", "accounting_column", "=", "accounting_column", ")", "copied_index", "=", "new_rows", ".", "index", "added_index", "=", "pd", ".", "Index", "(", "np", ".", "arange", "(", "starting_index", ",", "starting_index", "+", "len", "(", "new_rows", ".", "index", ")", ",", "dtype", "=", "np", ".", "int", ")", ")", "new_rows", ".", "index", "=", "added_index", "logger", ".", "debug", "(", "'finish: added {} rows in transition model'", ".", "format", "(", "len", "(", "new_rows", ")", ")", ")", "return", "pd", ".", "concat", "(", "[", "data", ",", "new_rows", "]", ")", ",", "added_index", ",", "copied_index" ]
Add rows to data table according to a given nrows. New rows will have their IDs set to NaN. Parameters ---------- data : pandas.DataFrame nrows : int Number of rows to add. starting_index : int, optional The starting index from which to calculate indexes for the new rows. If not given the max + 1 of the index of `data` will be used. accounting_column: string, optional Name of column with accounting totals/quanties to apply towards the control. If not provided then row counts will be used for accounting. Returns ------- updated : pandas.DataFrame Table with rows added. New rows will have their index values set to NaN. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries.
[ "Add", "rows", "to", "data", "table", "according", "to", "a", "given", "nrows", ".", "New", "rows", "will", "have", "their", "IDs", "set", "to", "NaN", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L24-L68
UDST/urbansim
urbansim/models/transition.py
remove_rows
def remove_rows(data, nrows, accounting_column=None): """ Remove a random `nrows` number of rows from a table. Parameters ---------- data : DataFrame nrows : float Number of rows to remove. accounting_column: string, optional Name of column with accounting totals/quanties to apply towards the control. If not provided then row counts will be used for accounting. Returns ------- updated : pandas.DataFrame Table with random rows removed. removed : pandas.Index Indexes of the rows removed from the table. """ logger.debug('start: removing {} rows in transition model'.format(nrows)) nrows = abs(nrows) # in case a negative number came in unit_check = data[accounting_column].sum() if accounting_column else len(data) if nrows == 0: return data, _empty_index() elif nrows > unit_check: raise ValueError('Number of rows to remove exceeds number of records in table.') remove_rows = sample_rows(nrows, data, accounting_column=accounting_column, replace=False) remove_index = remove_rows.index logger.debug('finish: removed {} rows in transition model'.format(nrows)) return data.loc[data.index.difference(remove_index)], remove_index
python
def remove_rows(data, nrows, accounting_column=None): logger.debug('start: removing {} rows in transition model'.format(nrows)) nrows = abs(nrows) unit_check = data[accounting_column].sum() if accounting_column else len(data) if nrows == 0: return data, _empty_index() elif nrows > unit_check: raise ValueError('Number of rows to remove exceeds number of records in table.') remove_rows = sample_rows(nrows, data, accounting_column=accounting_column, replace=False) remove_index = remove_rows.index logger.debug('finish: removed {} rows in transition model'.format(nrows)) return data.loc[data.index.difference(remove_index)], remove_index
[ "def", "remove_rows", "(", "data", ",", "nrows", ",", "accounting_column", "=", "None", ")", ":", "logger", ".", "debug", "(", "'start: removing {} rows in transition model'", ".", "format", "(", "nrows", ")", ")", "nrows", "=", "abs", "(", "nrows", ")", "# in case a negative number came in", "unit_check", "=", "data", "[", "accounting_column", "]", ".", "sum", "(", ")", "if", "accounting_column", "else", "len", "(", "data", ")", "if", "nrows", "==", "0", ":", "return", "data", ",", "_empty_index", "(", ")", "elif", "nrows", ">", "unit_check", ":", "raise", "ValueError", "(", "'Number of rows to remove exceeds number of records in table.'", ")", "remove_rows", "=", "sample_rows", "(", "nrows", ",", "data", ",", "accounting_column", "=", "accounting_column", ",", "replace", "=", "False", ")", "remove_index", "=", "remove_rows", ".", "index", "logger", ".", "debug", "(", "'finish: removed {} rows in transition model'", ".", "format", "(", "nrows", ")", ")", "return", "data", ".", "loc", "[", "data", ".", "index", ".", "difference", "(", "remove_index", ")", "]", ",", "remove_index" ]
Remove a random `nrows` number of rows from a table. Parameters ---------- data : DataFrame nrows : float Number of rows to remove. accounting_column: string, optional Name of column with accounting totals/quanties to apply towards the control. If not provided then row counts will be used for accounting. Returns ------- updated : pandas.DataFrame Table with random rows removed. removed : pandas.Index Indexes of the rows removed from the table.
[ "Remove", "a", "random", "nrows", "number", "of", "rows", "from", "a", "table", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L71-L104
UDST/urbansim
urbansim/models/transition.py
add_or_remove_rows
def add_or_remove_rows(data, nrows, starting_index=None, accounting_column=None): """ Add or remove rows to/from a table. Rows are added for positive `nrows` and removed for negative `nrows`. Parameters ---------- data : DataFrame nrows : float Number of rows to add or remove. starting_index : int, optional The starting index from which to calculate indexes for new rows. If not given the max + 1 of the index of `data` will be used. (Not applicable if rows are being removed.) Returns ------- updated : pandas.DataFrame Table with random rows removed. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries. removed : pandas.Index Index of rows that were removed. """ if nrows > 0: updated, added, copied = add_rows( data, nrows, starting_index, accounting_column=accounting_column) removed = _empty_index() elif nrows < 0: updated, removed = remove_rows(data, nrows, accounting_column=accounting_column) added, copied = _empty_index(), _empty_index() else: updated, added, copied, removed = \ data, _empty_index(), _empty_index(), _empty_index() return updated, added, copied, removed
python
def add_or_remove_rows(data, nrows, starting_index=None, accounting_column=None): if nrows > 0: updated, added, copied = add_rows( data, nrows, starting_index, accounting_column=accounting_column) removed = _empty_index() elif nrows < 0: updated, removed = remove_rows(data, nrows, accounting_column=accounting_column) added, copied = _empty_index(), _empty_index() else: updated, added, copied, removed = \ data, _empty_index(), _empty_index(), _empty_index() return updated, added, copied, removed
[ "def", "add_or_remove_rows", "(", "data", ",", "nrows", ",", "starting_index", "=", "None", ",", "accounting_column", "=", "None", ")", ":", "if", "nrows", ">", "0", ":", "updated", ",", "added", ",", "copied", "=", "add_rows", "(", "data", ",", "nrows", ",", "starting_index", ",", "accounting_column", "=", "accounting_column", ")", "removed", "=", "_empty_index", "(", ")", "elif", "nrows", "<", "0", ":", "updated", ",", "removed", "=", "remove_rows", "(", "data", ",", "nrows", ",", "accounting_column", "=", "accounting_column", ")", "added", ",", "copied", "=", "_empty_index", "(", ")", ",", "_empty_index", "(", ")", "else", ":", "updated", ",", "added", ",", "copied", ",", "removed", "=", "data", ",", "_empty_index", "(", ")", ",", "_empty_index", "(", ")", ",", "_empty_index", "(", ")", "return", "updated", ",", "added", ",", "copied", ",", "removed" ]
Add or remove rows to/from a table. Rows are added for positive `nrows` and removed for negative `nrows`. Parameters ---------- data : DataFrame nrows : float Number of rows to add or remove. starting_index : int, optional The starting index from which to calculate indexes for new rows. If not given the max + 1 of the index of `data` will be used. (Not applicable if rows are being removed.) Returns ------- updated : pandas.DataFrame Table with random rows removed. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries. removed : pandas.Index Index of rows that were removed.
[ "Add", "or", "remove", "rows", "to", "/", "from", "a", "table", ".", "Rows", "are", "added", "for", "positive", "nrows", "and", "removed", "for", "negative", "nrows", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L107-L149
UDST/urbansim
urbansim/models/transition.py
_update_linked_table
def _update_linked_table(table, col_name, added, copied, removed): """ Copy and update rows in a table that has a column referencing another table that has had rows added via copying. Parameters ---------- table : pandas.DataFrame Table to update with new or removed rows. col_name : str Name of column in `table` that corresponds to the index values in `copied` and `removed`. added : pandas.Index Indexes of rows that are new in the linked table. copied : pandas.Index Indexes of rows that were copied to make new rows in linked table. removed : pandas.Index Indexes of rows that were removed from the linked table. Returns ------- updated : pandas.DataFrame """ logger.debug('start: update linked table after transition') # handle removals table = table.loc[~table[col_name].isin(set(removed))] if (added is None or len(added) == 0): return table # map new IDs to the IDs from which they were copied id_map = pd.concat([pd.Series(copied, name=col_name), pd.Series(added, name='temp_id')], axis=1) # join to linked table and assign new id new_rows = id_map.merge(table, on=col_name) new_rows.drop(col_name, axis=1, inplace=True) new_rows.rename(columns={'temp_id': col_name}, inplace=True) # index the new rows starting_index = table.index.values.max() + 1 new_rows.index = np.arange(starting_index, starting_index + len(new_rows), dtype=np.int) logger.debug('finish: update linked table after transition') return pd.concat([table, new_rows])
python
def _update_linked_table(table, col_name, added, copied, removed): logger.debug('start: update linked table after transition') table = table.loc[~table[col_name].isin(set(removed))] if (added is None or len(added) == 0): return table id_map = pd.concat([pd.Series(copied, name=col_name), pd.Series(added, name='temp_id')], axis=1) new_rows = id_map.merge(table, on=col_name) new_rows.drop(col_name, axis=1, inplace=True) new_rows.rename(columns={'temp_id': col_name}, inplace=True) starting_index = table.index.values.max() + 1 new_rows.index = np.arange(starting_index, starting_index + len(new_rows), dtype=np.int) logger.debug('finish: update linked table after transition') return pd.concat([table, new_rows])
[ "def", "_update_linked_table", "(", "table", ",", "col_name", ",", "added", ",", "copied", ",", "removed", ")", ":", "logger", ".", "debug", "(", "'start: update linked table after transition'", ")", "# handle removals", "table", "=", "table", ".", "loc", "[", "~", "table", "[", "col_name", "]", ".", "isin", "(", "set", "(", "removed", ")", ")", "]", "if", "(", "added", "is", "None", "or", "len", "(", "added", ")", "==", "0", ")", ":", "return", "table", "# map new IDs to the IDs from which they were copied", "id_map", "=", "pd", ".", "concat", "(", "[", "pd", ".", "Series", "(", "copied", ",", "name", "=", "col_name", ")", ",", "pd", ".", "Series", "(", "added", ",", "name", "=", "'temp_id'", ")", "]", ",", "axis", "=", "1", ")", "# join to linked table and assign new id", "new_rows", "=", "id_map", ".", "merge", "(", "table", ",", "on", "=", "col_name", ")", "new_rows", ".", "drop", "(", "col_name", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "new_rows", ".", "rename", "(", "columns", "=", "{", "'temp_id'", ":", "col_name", "}", ",", "inplace", "=", "True", ")", "# index the new rows", "starting_index", "=", "table", ".", "index", ".", "values", ".", "max", "(", ")", "+", "1", "new_rows", ".", "index", "=", "np", ".", "arange", "(", "starting_index", ",", "starting_index", "+", "len", "(", "new_rows", ")", ",", "dtype", "=", "np", ".", "int", ")", "logger", ".", "debug", "(", "'finish: update linked table after transition'", ")", "return", "pd", ".", "concat", "(", "[", "table", ",", "new_rows", "]", ")" ]
Copy and update rows in a table that has a column referencing another table that has had rows added via copying. Parameters ---------- table : pandas.DataFrame Table to update with new or removed rows. col_name : str Name of column in `table` that corresponds to the index values in `copied` and `removed`. added : pandas.Index Indexes of rows that are new in the linked table. copied : pandas.Index Indexes of rows that were copied to make new rows in linked table. removed : pandas.Index Indexes of rows that were removed from the linked table. Returns ------- updated : pandas.DataFrame
[ "Copy", "and", "update", "rows", "in", "a", "table", "that", "has", "a", "column", "referencing", "another", "table", "that", "has", "had", "rows", "added", "via", "copying", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L424-L468
UDST/urbansim
urbansim/models/transition.py
GrowthRateTransition.transition
def transition(self, data, year): """ Add or remove rows to/from a table according to the prescribed growth rate for this model. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : None, optional Here for compatibility with other transition models, but ignored. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries. removed : pandas.Index Index of rows that were removed. """ if self.accounting_column is None: nrows = int(round(len(data) * self.growth_rate)) else: nrows = int(round(data[self.accounting_column].sum() * self.growth_rate)) with log_start_finish( 'adding {} rows via growth rate ({}) transition'.format( nrows, self.growth_rate), logger): return add_or_remove_rows(data, nrows, accounting_column=self.accounting_column)
python
def transition(self, data, year): if self.accounting_column is None: nrows = int(round(len(data) * self.growth_rate)) else: nrows = int(round(data[self.accounting_column].sum() * self.growth_rate)) with log_start_finish( 'adding {} rows via growth rate ({}) transition'.format( nrows, self.growth_rate), logger): return add_or_remove_rows(data, nrows, accounting_column=self.accounting_column)
[ "def", "transition", "(", "self", ",", "data", ",", "year", ")", ":", "if", "self", ".", "accounting_column", "is", "None", ":", "nrows", "=", "int", "(", "round", "(", "len", "(", "data", ")", "*", "self", ".", "growth_rate", ")", ")", "else", ":", "nrows", "=", "int", "(", "round", "(", "data", "[", "self", ".", "accounting_column", "]", ".", "sum", "(", ")", "*", "self", ".", "growth_rate", ")", ")", "with", "log_start_finish", "(", "'adding {} rows via growth rate ({}) transition'", ".", "format", "(", "nrows", ",", "self", ".", "growth_rate", ")", ",", "logger", ")", ":", "return", "add_or_remove_rows", "(", "data", ",", "nrows", ",", "accounting_column", "=", "self", ".", "accounting_column", ")" ]
Add or remove rows to/from a table according to the prescribed growth rate for this model. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : None, optional Here for compatibility with other transition models, but ignored. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries. removed : pandas.Index Index of rows that were removed.
[ "Add", "or", "remove", "rows", "to", "/", "from", "a", "table", "according", "to", "the", "prescribed", "growth", "rate", "for", "this", "model", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L167-L201
UDST/urbansim
urbansim/models/transition.py
TabularGrowthRateTransition.transition
def transition(self, data, year): """ Add or remove rows to/from a table according to the prescribed growth rate for this model and year. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : None, optional Here for compatibility with other transition models, but ignored. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries. removed : pandas.Index Index of rows that were removed. """ logger.debug('start: tabular transition') if year not in self._config_table.index: raise ValueError('No targets for given year: {}'.format(year)) # want this to be a DataFrame year_config = self._config_table.loc[[year]] logger.debug('transitioning {} segments'.format(len(year_config))) segments = [] added_indexes = [] copied_indexes = [] removed_indexes = [] # since we're looping over discrete segments we need to track # out here where their new indexes will begin starting_index = data.index.values.max() + 1 for _, row in year_config.iterrows(): subset = util.filter_table(data, row, ignore={self._config_column}) # Do not run on segment if it is empty if len(subset) == 0: logger.debug('empty segment encountered') continue if self.accounting_column is None: nrows = self._calc_nrows(len(subset), row[self._config_column]) else: nrows = self._calc_nrows( subset[self.accounting_column].sum(), row[self._config_column]) updated, added, copied, removed = \ add_or_remove_rows(subset, nrows, starting_index, self.accounting_column) if nrows > 0: # only update the starting index if rows were added starting_index = starting_index + nrows segments.append(updated) added_indexes.append(added) copied_indexes.append(copied) removed_indexes.append(removed) updated = pd.concat(segments) added_indexes = util.concat_indexes(added_indexes) copied_indexes = util.concat_indexes(copied_indexes) removed_indexes = util.concat_indexes(removed_indexes) logger.debug('finish: tabular transition') return updated, added_indexes, copied_indexes, removed_indexes
python
def transition(self, data, year): logger.debug('start: tabular transition') if year not in self._config_table.index: raise ValueError('No targets for given year: {}'.format(year)) year_config = self._config_table.loc[[year]] logger.debug('transitioning {} segments'.format(len(year_config))) segments = [] added_indexes = [] copied_indexes = [] removed_indexes = [] starting_index = data.index.values.max() + 1 for _, row in year_config.iterrows(): subset = util.filter_table(data, row, ignore={self._config_column}) if len(subset) == 0: logger.debug('empty segment encountered') continue if self.accounting_column is None: nrows = self._calc_nrows(len(subset), row[self._config_column]) else: nrows = self._calc_nrows( subset[self.accounting_column].sum(), row[self._config_column]) updated, added, copied, removed = \ add_or_remove_rows(subset, nrows, starting_index, self.accounting_column) if nrows > 0: starting_index = starting_index + nrows segments.append(updated) added_indexes.append(added) copied_indexes.append(copied) removed_indexes.append(removed) updated = pd.concat(segments) added_indexes = util.concat_indexes(added_indexes) copied_indexes = util.concat_indexes(copied_indexes) removed_indexes = util.concat_indexes(removed_indexes) logger.debug('finish: tabular transition') return updated, added_indexes, copied_indexes, removed_indexes
[ "def", "transition", "(", "self", ",", "data", ",", "year", ")", ":", "logger", ".", "debug", "(", "'start: tabular transition'", ")", "if", "year", "not", "in", "self", ".", "_config_table", ".", "index", ":", "raise", "ValueError", "(", "'No targets for given year: {}'", ".", "format", "(", "year", ")", ")", "# want this to be a DataFrame", "year_config", "=", "self", ".", "_config_table", ".", "loc", "[", "[", "year", "]", "]", "logger", ".", "debug", "(", "'transitioning {} segments'", ".", "format", "(", "len", "(", "year_config", ")", ")", ")", "segments", "=", "[", "]", "added_indexes", "=", "[", "]", "copied_indexes", "=", "[", "]", "removed_indexes", "=", "[", "]", "# since we're looping over discrete segments we need to track", "# out here where their new indexes will begin", "starting_index", "=", "data", ".", "index", ".", "values", ".", "max", "(", ")", "+", "1", "for", "_", ",", "row", "in", "year_config", ".", "iterrows", "(", ")", ":", "subset", "=", "util", ".", "filter_table", "(", "data", ",", "row", ",", "ignore", "=", "{", "self", ".", "_config_column", "}", ")", "# Do not run on segment if it is empty", "if", "len", "(", "subset", ")", "==", "0", ":", "logger", ".", "debug", "(", "'empty segment encountered'", ")", "continue", "if", "self", ".", "accounting_column", "is", "None", ":", "nrows", "=", "self", ".", "_calc_nrows", "(", "len", "(", "subset", ")", ",", "row", "[", "self", ".", "_config_column", "]", ")", "else", ":", "nrows", "=", "self", ".", "_calc_nrows", "(", "subset", "[", "self", ".", "accounting_column", "]", ".", "sum", "(", ")", ",", "row", "[", "self", ".", "_config_column", "]", ")", "updated", ",", "added", ",", "copied", ",", "removed", "=", "add_or_remove_rows", "(", "subset", ",", "nrows", ",", "starting_index", ",", "self", ".", "accounting_column", ")", "if", "nrows", ">", "0", ":", "# only update the starting index if rows were added", "starting_index", "=", "starting_index", "+", "nrows", "segments", ".", "append", "(", "updated", ")", "added_indexes", ".", "append", "(", "added", ")", "copied_indexes", ".", "append", "(", "copied", ")", "removed_indexes", ".", "append", "(", "removed", ")", "updated", "=", "pd", ".", "concat", "(", "segments", ")", "added_indexes", "=", "util", ".", "concat_indexes", "(", "added_indexes", ")", "copied_indexes", "=", "util", ".", "concat_indexes", "(", "copied_indexes", ")", "removed_indexes", "=", "util", ".", "concat_indexes", "(", "removed_indexes", ")", "logger", ".", "debug", "(", "'finish: tabular transition'", ")", "return", "updated", ",", "added_indexes", ",", "copied_indexes", ",", "removed_indexes" ]
Add or remove rows to/from a table according to the prescribed growth rate for this model and year. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : None, optional Here for compatibility with other transition models, but ignored. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries. removed : pandas.Index Index of rows that were removed.
[ "Add", "or", "remove", "rows", "to", "/", "from", "a", "table", "according", "to", "the", "prescribed", "growth", "rate", "for", "this", "model", "and", "year", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L261-L335
UDST/urbansim
urbansim/models/transition.py
TabularTotalsTransition.transition
def transition(self, data, year): """ Add or remove rows to/from a table according to the prescribed totals for this model and year. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : None, optional Here for compatibility with other transition models, but ignored. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries. removed : pandas.Index Index of rows that were removed. """ with log_start_finish('tabular totals transition', logger): return super(TabularTotalsTransition, self).transition(data, year)
python
def transition(self, data, year): with log_start_finish('tabular totals transition', logger): return super(TabularTotalsTransition, self).transition(data, year)
[ "def", "transition", "(", "self", ",", "data", ",", "year", ")", ":", "with", "log_start_finish", "(", "'tabular totals transition'", ",", "logger", ")", ":", "return", "super", "(", "TabularTotalsTransition", ",", "self", ")", ".", "transition", "(", "data", ",", "year", ")" ]
Add or remove rows to/from a table according to the prescribed totals for this model and year. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : None, optional Here for compatibility with other transition models, but ignored. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Index New indexes of the rows that were added. copied : pandas.Index Indexes of rows that were copied. A row copied multiple times will have multiple entries. removed : pandas.Index Index of rows that were removed.
[ "Add", "or", "remove", "rows", "to", "/", "from", "a", "table", "according", "to", "the", "prescribed", "totals", "for", "this", "model", "and", "year", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L394-L421
UDST/urbansim
urbansim/models/transition.py
TransitionModel.transition
def transition(self, data, year, linked_tables=None): """ Add or remove rows from a table based on population targets. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : int Year number that will be passed to `transitioner`. linked_tables : dict of tuple, optional Dictionary of (table, 'column name') pairs. The column name should match the index of `data`. Indexes in `data` that are copied or removed will also be copied and removed in linked tables. They dictionary keys are used in the returned `updated_links`. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Series Indexes of new rows in `updated`. updated_links : dict of pandas.DataFrame """ logger.debug('start: transition') linked_tables = linked_tables or {} updated_links = {} with log_start_finish('add/remove rows', logger): updated, added, copied, removed = self.transitioner(data, year) for table_name, (table, col) in linked_tables.items(): logger.debug('updating linked table {}'.format(table_name)) updated_links[table_name] = \ _update_linked_table(table, col, added, copied, removed) logger.debug('finish: transition') return updated, added, updated_links
python
def transition(self, data, year, linked_tables=None): logger.debug('start: transition') linked_tables = linked_tables or {} updated_links = {} with log_start_finish('add/remove rows', logger): updated, added, copied, removed = self.transitioner(data, year) for table_name, (table, col) in linked_tables.items(): logger.debug('updating linked table {}'.format(table_name)) updated_links[table_name] = \ _update_linked_table(table, col, added, copied, removed) logger.debug('finish: transition') return updated, added, updated_links
[ "def", "transition", "(", "self", ",", "data", ",", "year", ",", "linked_tables", "=", "None", ")", ":", "logger", ".", "debug", "(", "'start: transition'", ")", "linked_tables", "=", "linked_tables", "or", "{", "}", "updated_links", "=", "{", "}", "with", "log_start_finish", "(", "'add/remove rows'", ",", "logger", ")", ":", "updated", ",", "added", ",", "copied", ",", "removed", "=", "self", ".", "transitioner", "(", "data", ",", "year", ")", "for", "table_name", ",", "(", "table", ",", "col", ")", "in", "linked_tables", ".", "items", "(", ")", ":", "logger", ".", "debug", "(", "'updating linked table {}'", ".", "format", "(", "table_name", ")", ")", "updated_links", "[", "table_name", "]", "=", "_update_linked_table", "(", "table", ",", "col", ",", "added", ",", "copied", ",", "removed", ")", "logger", ".", "debug", "(", "'finish: transition'", ")", "return", "updated", ",", "added", ",", "updated_links" ]
Add or remove rows from a table based on population targets. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : int Year number that will be passed to `transitioner`. linked_tables : dict of tuple, optional Dictionary of (table, 'column name') pairs. The column name should match the index of `data`. Indexes in `data` that are copied or removed will also be copied and removed in linked tables. They dictionary keys are used in the returned `updated_links`. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Series Indexes of new rows in `updated`. updated_links : dict of pandas.DataFrame
[ "Add", "or", "remove", "rows", "from", "a", "table", "based", "on", "population", "targets", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L486-L525
UDST/urbansim
urbansim/utils/yamlio.py
series_to_yaml_safe
def series_to_yaml_safe(series, ordered=False): """ Convert a pandas Series to a dict that will survive YAML serialization and re-conversion back to a Series. Parameters ---------- series : pandas.Series ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict """ index = series.index.to_native_types(quoting=True) values = series.values.tolist() if ordered: return OrderedDict( tuple((k, v)) for k, v in zip(index, values)) else: return {i: v for i, v in zip(index, values)}
python
def series_to_yaml_safe(series, ordered=False): index = series.index.to_native_types(quoting=True) values = series.values.tolist() if ordered: return OrderedDict( tuple((k, v)) for k, v in zip(index, values)) else: return {i: v for i, v in zip(index, values)}
[ "def", "series_to_yaml_safe", "(", "series", ",", "ordered", "=", "False", ")", ":", "index", "=", "series", ".", "index", ".", "to_native_types", "(", "quoting", "=", "True", ")", "values", "=", "series", ".", "values", ".", "tolist", "(", ")", "if", "ordered", ":", "return", "OrderedDict", "(", "tuple", "(", "(", "k", ",", "v", ")", ")", "for", "k", ",", "v", "in", "zip", "(", "index", ",", "values", ")", ")", "else", ":", "return", "{", "i", ":", "v", "for", "i", ",", "v", "in", "zip", "(", "index", ",", "values", ")", "}" ]
Convert a pandas Series to a dict that will survive YAML serialization and re-conversion back to a Series. Parameters ---------- series : pandas.Series ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict
[ "Convert", "a", "pandas", "Series", "to", "a", "dict", "that", "will", "survive", "YAML", "serialization", "and", "re", "-", "conversion", "back", "to", "a", "Series", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L32-L55
UDST/urbansim
urbansim/utils/yamlio.py
frame_to_yaml_safe
def frame_to_yaml_safe(frame, ordered=False): """ Convert a pandas DataFrame to a dictionary that will survive YAML serialization and re-conversion back to a DataFrame. Parameters ---------- frame : pandas.DataFrame ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict """ if ordered: return OrderedDict(tuple((col, series_to_yaml_safe(series, True)) for col, series in frame.iteritems())) else: return {col: series_to_yaml_safe(series) for col, series in frame.iteritems()}
python
def frame_to_yaml_safe(frame, ordered=False): if ordered: return OrderedDict(tuple((col, series_to_yaml_safe(series, True)) for col, series in frame.iteritems())) else: return {col: series_to_yaml_safe(series) for col, series in frame.iteritems()}
[ "def", "frame_to_yaml_safe", "(", "frame", ",", "ordered", "=", "False", ")", ":", "if", "ordered", ":", "return", "OrderedDict", "(", "tuple", "(", "(", "col", ",", "series_to_yaml_safe", "(", "series", ",", "True", ")", ")", "for", "col", ",", "series", "in", "frame", ".", "iteritems", "(", ")", ")", ")", "else", ":", "return", "{", "col", ":", "series_to_yaml_safe", "(", "series", ")", "for", "col", ",", "series", "in", "frame", ".", "iteritems", "(", ")", "}" ]
Convert a pandas DataFrame to a dictionary that will survive YAML serialization and re-conversion back to a DataFrame. Parameters ---------- frame : pandas.DataFrame ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- safe : dict or OrderedDict
[ "Convert", "a", "pandas", "DataFrame", "to", "a", "dictionary", "that", "will", "survive", "YAML", "serialization", "and", "re", "-", "conversion", "back", "to", "a", "DataFrame", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L58-L79
UDST/urbansim
urbansim/utils/yamlio.py
ordered_yaml
def ordered_yaml(cfg, order=None): """ Convert a dictionary to a YAML string with preferential ordering for some keys. Converted string is meant to be fairly human readable. Parameters ---------- cfg : dict Dictionary to convert to a YAML string. order: list If provided, overrides the default key ordering. Returns ------- str Nicely formatted YAML string. """ if order is None: order = ['name', 'model_type', 'segmentation_col', 'fit_filters', 'predict_filters', 'choosers_fit_filters', 'choosers_predict_filters', 'alts_fit_filters', 'alts_predict_filters', 'interaction_predict_filters', 'choice_column', 'sample_size', 'estimation_sample_size', 'prediction_sample_size', 'model_expression', 'ytransform', 'min_segment_size', 'default_config', 'models', 'coefficients', 'fitted'] s = [] for key in order: if key not in cfg: continue s.append( yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4)) for key in cfg: if key in order: continue s.append( yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4)) return '\n'.join(s)
python
def ordered_yaml(cfg, order=None): if order is None: order = ['name', 'model_type', 'segmentation_col', 'fit_filters', 'predict_filters', 'choosers_fit_filters', 'choosers_predict_filters', 'alts_fit_filters', 'alts_predict_filters', 'interaction_predict_filters', 'choice_column', 'sample_size', 'estimation_sample_size', 'prediction_sample_size', 'model_expression', 'ytransform', 'min_segment_size', 'default_config', 'models', 'coefficients', 'fitted'] s = [] for key in order: if key not in cfg: continue s.append( yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4)) for key in cfg: if key in order: continue s.append( yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4)) return '\n'.join(s)
[ "def", "ordered_yaml", "(", "cfg", ",", "order", "=", "None", ")", ":", "if", "order", "is", "None", ":", "order", "=", "[", "'name'", ",", "'model_type'", ",", "'segmentation_col'", ",", "'fit_filters'", ",", "'predict_filters'", ",", "'choosers_fit_filters'", ",", "'choosers_predict_filters'", ",", "'alts_fit_filters'", ",", "'alts_predict_filters'", ",", "'interaction_predict_filters'", ",", "'choice_column'", ",", "'sample_size'", ",", "'estimation_sample_size'", ",", "'prediction_sample_size'", ",", "'model_expression'", ",", "'ytransform'", ",", "'min_segment_size'", ",", "'default_config'", ",", "'models'", ",", "'coefficients'", ",", "'fitted'", "]", "s", "=", "[", "]", "for", "key", "in", "order", ":", "if", "key", "not", "in", "cfg", ":", "continue", "s", ".", "append", "(", "yaml", ".", "dump", "(", "{", "key", ":", "cfg", "[", "key", "]", "}", ",", "default_flow_style", "=", "False", ",", "indent", "=", "4", ")", ")", "for", "key", "in", "cfg", ":", "if", "key", "in", "order", ":", "continue", "s", ".", "append", "(", "yaml", ".", "dump", "(", "{", "key", ":", "cfg", "[", "key", "]", "}", ",", "default_flow_style", "=", "False", ",", "indent", "=", "4", ")", ")", "return", "'\\n'", ".", "join", "(", "s", ")" ]
Convert a dictionary to a YAML string with preferential ordering for some keys. Converted string is meant to be fairly human readable. Parameters ---------- cfg : dict Dictionary to convert to a YAML string. order: list If provided, overrides the default key ordering. Returns ------- str Nicely formatted YAML string.
[ "Convert", "a", "dictionary", "to", "a", "YAML", "string", "with", "preferential", "ordering", "for", "some", "keys", ".", "Converted", "string", "is", "meant", "to", "be", "fairly", "human", "readable", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L92-L134
UDST/urbansim
urbansim/utils/yamlio.py
convert_to_yaml
def convert_to_yaml(cfg, str_or_buffer): """ Convert a dictionary to YAML and return the string or write it out depending on the type of `str_or_buffer`. Parameters ---------- cfg : dict or OrderedDict Dictionary or OrderedDict to convert. str_or_buffer : None, str, or buffer If None: the YAML string will be returned. If string: YAML will be saved to a file. If buffer: YAML will be written to buffer using the ``.write`` method. Returns ------- str or None YAML string if `str_or_buffer` is None, otherwise None since YAML is written out to a separate destination. """ order = None if isinstance(cfg, OrderedDict): order = [] s = ordered_yaml(cfg, order) if not str_or_buffer: return s elif isinstance(str_or_buffer, str): with open(str_or_buffer, 'w') as f: f.write(s) else: str_or_buffer.write(s)
python
def convert_to_yaml(cfg, str_or_buffer): order = None if isinstance(cfg, OrderedDict): order = [] s = ordered_yaml(cfg, order) if not str_or_buffer: return s elif isinstance(str_or_buffer, str): with open(str_or_buffer, 'w') as f: f.write(s) else: str_or_buffer.write(s)
[ "def", "convert_to_yaml", "(", "cfg", ",", "str_or_buffer", ")", ":", "order", "=", "None", "if", "isinstance", "(", "cfg", ",", "OrderedDict", ")", ":", "order", "=", "[", "]", "s", "=", "ordered_yaml", "(", "cfg", ",", "order", ")", "if", "not", "str_or_buffer", ":", "return", "s", "elif", "isinstance", "(", "str_or_buffer", ",", "str", ")", ":", "with", "open", "(", "str_or_buffer", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "s", ")", "else", ":", "str_or_buffer", ".", "write", "(", "s", ")" ]
Convert a dictionary to YAML and return the string or write it out depending on the type of `str_or_buffer`. Parameters ---------- cfg : dict or OrderedDict Dictionary or OrderedDict to convert. str_or_buffer : None, str, or buffer If None: the YAML string will be returned. If string: YAML will be saved to a file. If buffer: YAML will be written to buffer using the ``.write`` method. Returns ------- str or None YAML string if `str_or_buffer` is None, otherwise None since YAML is written out to a separate destination.
[ "Convert", "a", "dictionary", "to", "YAML", "and", "return", "the", "string", "or", "write", "it", "out", "depending", "on", "the", "type", "of", "str_or_buffer", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L160-L193
UDST/urbansim
urbansim/utils/yamlio.py
yaml_to_dict
def yaml_to_dict(yaml_str=None, str_or_buffer=None, ordered=False): """ Load YAML from a string, file, or buffer (an object with a .read method). Parameters are mutually exclusive. Parameters ---------- yaml_str : str, optional A string of YAML. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- dict Conversion from YAML. """ if not yaml_str and not str_or_buffer: raise ValueError('One of yaml_str or str_or_buffer is required.') # determine which load method to use if ordered: loader = __ordered_load else: loader = yaml.load if yaml_str: d = loader(yaml_str) elif isinstance(str_or_buffer, str): with open(str_or_buffer) as f: d = loader(f) else: d = loader(str_or_buffer) return d
python
def yaml_to_dict(yaml_str=None, str_or_buffer=None, ordered=False): if not yaml_str and not str_or_buffer: raise ValueError('One of yaml_str or str_or_buffer is required.') if ordered: loader = __ordered_load else: loader = yaml.load if yaml_str: d = loader(yaml_str) elif isinstance(str_or_buffer, str): with open(str_or_buffer) as f: d = loader(f) else: d = loader(str_or_buffer) return d
[ "def", "yaml_to_dict", "(", "yaml_str", "=", "None", ",", "str_or_buffer", "=", "None", ",", "ordered", "=", "False", ")", ":", "if", "not", "yaml_str", "and", "not", "str_or_buffer", ":", "raise", "ValueError", "(", "'One of yaml_str or str_or_buffer is required.'", ")", "# determine which load method to use", "if", "ordered", ":", "loader", "=", "__ordered_load", "else", ":", "loader", "=", "yaml", ".", "load", "if", "yaml_str", ":", "d", "=", "loader", "(", "yaml_str", ")", "elif", "isinstance", "(", "str_or_buffer", ",", "str", ")", ":", "with", "open", "(", "str_or_buffer", ")", "as", "f", ":", "d", "=", "loader", "(", "f", ")", "else", ":", "d", "=", "loader", "(", "str_or_buffer", ")", "return", "d" ]
Load YAML from a string, file, or buffer (an object with a .read method). Parameters are mutually exclusive. Parameters ---------- yaml_str : str, optional A string of YAML. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. ordered: bool, optional, default False If True, an OrderedDict is returned. Returns ------- dict Conversion from YAML.
[ "Load", "YAML", "from", "a", "string", "file", "or", "buffer", "(", "an", "object", "with", "a", ".", "read", "method", ")", ".", "Parameters", "are", "mutually", "exclusive", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L196-L233
UDST/urbansim
urbansim/accounts.py
Account.add_transaction
def add_transaction(self, amount, subaccount=None, metadata=None): """ Add a new transaction to the account. Parameters ---------- amount : float Negative for withdrawls, positive for deposits. subaccount : object, optional Any indicator of a subaccount to which this transaction applies. metadata : dict, optional Any extra metadata to record with the transaction. (E.g. Info about where the money is coming from or going.) May not contain keys 'amount' or 'subaccount'. """ metadata = metadata or {} self.transactions.append(Transaction(amount, subaccount, metadata)) self.balance += amount
python
def add_transaction(self, amount, subaccount=None, metadata=None): metadata = metadata or {} self.transactions.append(Transaction(amount, subaccount, metadata)) self.balance += amount
[ "def", "add_transaction", "(", "self", ",", "amount", ",", "subaccount", "=", "None", ",", "metadata", "=", "None", ")", ":", "metadata", "=", "metadata", "or", "{", "}", "self", ".", "transactions", ".", "append", "(", "Transaction", "(", "amount", ",", "subaccount", ",", "metadata", ")", ")", "self", ".", "balance", "+=", "amount" ]
Add a new transaction to the account. Parameters ---------- amount : float Negative for withdrawls, positive for deposits. subaccount : object, optional Any indicator of a subaccount to which this transaction applies. metadata : dict, optional Any extra metadata to record with the transaction. (E.g. Info about where the money is coming from or going.) May not contain keys 'amount' or 'subaccount'.
[ "Add", "a", "new", "transaction", "to", "the", "account", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/accounts.py#L57-L75
UDST/urbansim
urbansim/accounts.py
Account.total_transactions_by_subacct
def total_transactions_by_subacct(self, subaccount): """ Get the sum of all transactions for a given subaccount. Parameters ---------- subaccount : object Identifier of subaccount. Returns ------- total : float """ return sum( t.amount for t in self.transactions if t.subaccount == subaccount)
python
def total_transactions_by_subacct(self, subaccount): return sum( t.amount for t in self.transactions if t.subaccount == subaccount)
[ "def", "total_transactions_by_subacct", "(", "self", ",", "subaccount", ")", ":", "return", "sum", "(", "t", ".", "amount", "for", "t", "in", "self", ".", "transactions", "if", "t", ".", "subaccount", "==", "subaccount", ")" ]
Get the sum of all transactions for a given subaccount. Parameters ---------- subaccount : object Identifier of subaccount. Returns ------- total : float
[ "Get", "the", "sum", "of", "all", "transactions", "for", "a", "given", "subaccount", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/accounts.py#L102-L117
UDST/urbansim
urbansim/accounts.py
Account.to_frame
def to_frame(self): """ Return transactions as a pandas DataFrame. """ col_names = _column_names_from_metadata( t.metadata for t in self.transactions) def trow(t): return tz.concatv( (t.amount, t.subaccount), (t.metadata.get(c) for c in col_names)) rows = [trow(t) for t in self.transactions] if len(rows) == 0: return pd.DataFrame(columns=COLS + col_names) return pd.DataFrame(rows, columns=COLS + col_names)
python
def to_frame(self): col_names = _column_names_from_metadata( t.metadata for t in self.transactions) def trow(t): return tz.concatv( (t.amount, t.subaccount), (t.metadata.get(c) for c in col_names)) rows = [trow(t) for t in self.transactions] if len(rows) == 0: return pd.DataFrame(columns=COLS + col_names) return pd.DataFrame(rows, columns=COLS + col_names)
[ "def", "to_frame", "(", "self", ")", ":", "col_names", "=", "_column_names_from_metadata", "(", "t", ".", "metadata", "for", "t", "in", "self", ".", "transactions", ")", "def", "trow", "(", "t", ")", ":", "return", "tz", ".", "concatv", "(", "(", "t", ".", "amount", ",", "t", ".", "subaccount", ")", ",", "(", "t", ".", "metadata", ".", "get", "(", "c", ")", "for", "c", "in", "col_names", ")", ")", "rows", "=", "[", "trow", "(", "t", ")", "for", "t", "in", "self", ".", "transactions", "]", "if", "len", "(", "rows", ")", "==", "0", ":", "return", "pd", ".", "DataFrame", "(", "columns", "=", "COLS", "+", "col_names", ")", "return", "pd", ".", "DataFrame", "(", "rows", ",", "columns", "=", "COLS", "+", "col_names", ")" ]
Return transactions as a pandas DataFrame.
[ "Return", "transactions", "as", "a", "pandas", "DataFrame", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/accounts.py#L136-L153
UDST/urbansim
urbansim/models/util.py
apply_filter_query
def apply_filter_query(df, filters=None): """ Use the DataFrame.query method to filter a table down to the desired rows. Parameters ---------- df : pandas.DataFrame filters : list of str or str, optional List of filters to apply. Will be joined together with ' and ' and passed to DataFrame.query. A string will be passed straight to DataFrame.query. If not supplied no filtering will be done. Returns ------- filtered_df : pandas.DataFrame """ with log_start_finish('apply filter query: {!r}'.format(filters), logger): if filters: if isinstance(filters, str): query = filters else: query = ' and '.join(filters) return df.query(query) else: return df
python
def apply_filter_query(df, filters=None): with log_start_finish('apply filter query: {!r}'.format(filters), logger): if filters: if isinstance(filters, str): query = filters else: query = ' and '.join(filters) return df.query(query) else: return df
[ "def", "apply_filter_query", "(", "df", ",", "filters", "=", "None", ")", ":", "with", "log_start_finish", "(", "'apply filter query: {!r}'", ".", "format", "(", "filters", ")", ",", "logger", ")", ":", "if", "filters", ":", "if", "isinstance", "(", "filters", ",", "str", ")", ":", "query", "=", "filters", "else", ":", "query", "=", "' and '", ".", "join", "(", "filters", ")", "return", "df", ".", "query", "(", "query", ")", "else", ":", "return", "df" ]
Use the DataFrame.query method to filter a table down to the desired rows. Parameters ---------- df : pandas.DataFrame filters : list of str or str, optional List of filters to apply. Will be joined together with ' and ' and passed to DataFrame.query. A string will be passed straight to DataFrame.query. If not supplied no filtering will be done. Returns ------- filtered_df : pandas.DataFrame
[ "Use", "the", "DataFrame", ".", "query", "method", "to", "filter", "a", "table", "down", "to", "the", "desired", "rows", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L24-L51
UDST/urbansim
urbansim/models/util.py
_filterize
def _filterize(name, value): """ Turn a `name` and `value` into a string expression compatible the ``DataFrame.query`` method. Parameters ---------- name : str Should be the name of a column in the table to which the filter will be applied. A suffix of '_max' will result in a "less than" filter, a suffix of '_min' will result in a "greater than or equal to" filter, and no recognized suffix will result in an "equal to" filter. value : any Value side of filter for comparison to column values. Returns ------- filter_exp : str """ if name.endswith('_min'): name = name[:-4] comp = '>=' elif name.endswith('_max'): name = name[:-4] comp = '<' else: comp = '==' result = '{} {} {!r}'.format(name, comp, value) logger.debug( 'converted name={} and value={} to filter {}'.format( name, value, result)) return result
python
def _filterize(name, value): if name.endswith('_min'): name = name[:-4] comp = '>=' elif name.endswith('_max'): name = name[:-4] comp = '<' else: comp = '==' result = '{} {} {!r}'.format(name, comp, value) logger.debug( 'converted name={} and value={} to filter {}'.format( name, value, result)) return result
[ "def", "_filterize", "(", "name", ",", "value", ")", ":", "if", "name", ".", "endswith", "(", "'_min'", ")", ":", "name", "=", "name", "[", ":", "-", "4", "]", "comp", "=", "'>='", "elif", "name", ".", "endswith", "(", "'_max'", ")", ":", "name", "=", "name", "[", ":", "-", "4", "]", "comp", "=", "'<'", "else", ":", "comp", "=", "'=='", "result", "=", "'{} {} {!r}'", ".", "format", "(", "name", ",", "comp", ",", "value", ")", "logger", ".", "debug", "(", "'converted name={} and value={} to filter {}'", ".", "format", "(", "name", ",", "value", ",", "result", ")", ")", "return", "result" ]
Turn a `name` and `value` into a string expression compatible the ``DataFrame.query`` method. Parameters ---------- name : str Should be the name of a column in the table to which the filter will be applied. A suffix of '_max' will result in a "less than" filter, a suffix of '_min' will result in a "greater than or equal to" filter, and no recognized suffix will result in an "equal to" filter. value : any Value side of filter for comparison to column values. Returns ------- filter_exp : str
[ "Turn", "a", "name", "and", "value", "into", "a", "string", "expression", "compatible", "the", "DataFrame", ".", "query", "method", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L54-L89
UDST/urbansim
urbansim/models/util.py
filter_table
def filter_table(table, filter_series, ignore=None): """ Filter a table based on a set of restrictions given in Series of column name / filter parameter pairs. The column names can have suffixes `_min` and `_max` to indicate "less than" and "greater than" constraints. Parameters ---------- table : pandas.DataFrame Table to filter. filter_series : pandas.Series Series of column name / value pairs of filter constraints. Columns that ends with '_max' will be used to create a "less than" filters, columns that end with '_min' will be used to create "greater than or equal to" filters. A column with no suffix will be used to make an 'equal to' filter. ignore : sequence of str, optional List of column names that should not be used for filtering. Returns ------- filtered : pandas.DataFrame """ with log_start_finish('filter table', logger): ignore = ignore if ignore else set() filters = [_filterize(name, val) for name, val in filter_series.iteritems() if not (name in ignore or (isinstance(val, numbers.Number) and np.isnan(val)))] return apply_filter_query(table, filters)
python
def filter_table(table, filter_series, ignore=None): with log_start_finish('filter table', logger): ignore = ignore if ignore else set() filters = [_filterize(name, val) for name, val in filter_series.iteritems() if not (name in ignore or (isinstance(val, numbers.Number) and np.isnan(val)))] return apply_filter_query(table, filters)
[ "def", "filter_table", "(", "table", ",", "filter_series", ",", "ignore", "=", "None", ")", ":", "with", "log_start_finish", "(", "'filter table'", ",", "logger", ")", ":", "ignore", "=", "ignore", "if", "ignore", "else", "set", "(", ")", "filters", "=", "[", "_filterize", "(", "name", ",", "val", ")", "for", "name", ",", "val", "in", "filter_series", ".", "iteritems", "(", ")", "if", "not", "(", "name", "in", "ignore", "or", "(", "isinstance", "(", "val", ",", "numbers", ".", "Number", ")", "and", "np", ".", "isnan", "(", "val", ")", ")", ")", "]", "return", "apply_filter_query", "(", "table", ",", "filters", ")" ]
Filter a table based on a set of restrictions given in Series of column name / filter parameter pairs. The column names can have suffixes `_min` and `_max` to indicate "less than" and "greater than" constraints. Parameters ---------- table : pandas.DataFrame Table to filter. filter_series : pandas.Series Series of column name / value pairs of filter constraints. Columns that ends with '_max' will be used to create a "less than" filters, columns that end with '_min' will be used to create "greater than or equal to" filters. A column with no suffix will be used to make an 'equal to' filter. ignore : sequence of str, optional List of column names that should not be used for filtering. Returns ------- filtered : pandas.DataFrame
[ "Filter", "a", "table", "based", "on", "a", "set", "of", "restrictions", "given", "in", "Series", "of", "column", "name", "/", "filter", "parameter", "pairs", ".", "The", "column", "names", "can", "have", "suffixes", "_min", "and", "_max", "to", "indicate", "less", "than", "and", "greater", "than", "constraints", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L92-L126
UDST/urbansim
urbansim/models/util.py
has_constant_expr
def has_constant_expr(expr): """ Report whether a model expression has constant specific term. That is, a term explicitly specying whether the model should or should not include a constant. (e.g. '+ 1' or '- 1'.) Parameters ---------- expr : str Model expression to check. Returns ------- has_constant : bool """ def has_constant(node): if node.type == 'ONE': return True for n in node.args: if has_constant(n): return True return False return has_constant(patsy.parse_formula.parse_formula(expr))
python
def has_constant_expr(expr): def has_constant(node): if node.type == 'ONE': return True for n in node.args: if has_constant(n): return True return False return has_constant(patsy.parse_formula.parse_formula(expr))
[ "def", "has_constant_expr", "(", "expr", ")", ":", "def", "has_constant", "(", "node", ")", ":", "if", "node", ".", "type", "==", "'ONE'", ":", "return", "True", "for", "n", "in", "node", ".", "args", ":", "if", "has_constant", "(", "n", ")", ":", "return", "True", "return", "False", "return", "has_constant", "(", "patsy", ".", "parse_formula", ".", "parse_formula", "(", "expr", ")", ")" ]
Report whether a model expression has constant specific term. That is, a term explicitly specying whether the model should or should not include a constant. (e.g. '+ 1' or '- 1'.) Parameters ---------- expr : str Model expression to check. Returns ------- has_constant : bool
[ "Report", "whether", "a", "model", "expression", "has", "constant", "specific", "term", ".", "That", "is", "a", "term", "explicitly", "specying", "whether", "the", "model", "should", "or", "should", "not", "include", "a", "constant", ".", "(", "e", ".", "g", ".", "+", "1", "or", "-", "1", ".", ")" ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L145-L171
UDST/urbansim
urbansim/models/util.py
str_model_expression
def str_model_expression(expr, add_constant=True): """ We support specifying model expressions as strings, lists, or dicts; but for use with patsy and statsmodels we need a string. This function will take any of those as input and return a string. Parameters ---------- expr : str, iterable, or dict A string will be returned unmodified except to add or remove a constant. An iterable sequence will be joined together with ' + '. A dictionary should have ``right_side`` and, optionally, ``left_side`` keys. The ``right_side`` can be a list or a string and will be handled as above. If ``left_side`` is present it will be joined with ``right_side`` with ' ~ '. add_constant : bool, optional Whether to add a ' + 1' (if True) or ' - 1' (if False) to the model. If the expression already has a '+ 1' or '- 1' this option will be ignored. Returns ------- model_expression : str A string model expression suitable for use with statsmodels and patsy. """ if not isinstance(expr, str): if isinstance(expr, collections.Mapping): left_side = expr.get('left_side') right_side = str_model_expression(expr['right_side'], add_constant) else: # some kind of iterable like a list left_side = None right_side = ' + '.join(expr) if left_side: model_expression = ' ~ '.join((left_side, right_side)) else: model_expression = right_side else: model_expression = expr if not has_constant_expr(model_expression): if add_constant: model_expression += ' + 1' else: model_expression += ' - 1' logger.debug( 'converted expression: {!r} to model: {!r}'.format( expr, model_expression)) return model_expression
python
def str_model_expression(expr, add_constant=True): if not isinstance(expr, str): if isinstance(expr, collections.Mapping): left_side = expr.get('left_side') right_side = str_model_expression(expr['right_side'], add_constant) else: left_side = None right_side = ' + '.join(expr) if left_side: model_expression = ' ~ '.join((left_side, right_side)) else: model_expression = right_side else: model_expression = expr if not has_constant_expr(model_expression): if add_constant: model_expression += ' + 1' else: model_expression += ' - 1' logger.debug( 'converted expression: {!r} to model: {!r}'.format( expr, model_expression)) return model_expression
[ "def", "str_model_expression", "(", "expr", ",", "add_constant", "=", "True", ")", ":", "if", "not", "isinstance", "(", "expr", ",", "str", ")", ":", "if", "isinstance", "(", "expr", ",", "collections", ".", "Mapping", ")", ":", "left_side", "=", "expr", ".", "get", "(", "'left_side'", ")", "right_side", "=", "str_model_expression", "(", "expr", "[", "'right_side'", "]", ",", "add_constant", ")", "else", ":", "# some kind of iterable like a list", "left_side", "=", "None", "right_side", "=", "' + '", ".", "join", "(", "expr", ")", "if", "left_side", ":", "model_expression", "=", "' ~ '", ".", "join", "(", "(", "left_side", ",", "right_side", ")", ")", "else", ":", "model_expression", "=", "right_side", "else", ":", "model_expression", "=", "expr", "if", "not", "has_constant_expr", "(", "model_expression", ")", ":", "if", "add_constant", ":", "model_expression", "+=", "' + 1'", "else", ":", "model_expression", "+=", "' - 1'", "logger", ".", "debug", "(", "'converted expression: {!r} to model: {!r}'", ".", "format", "(", "expr", ",", "model_expression", ")", ")", "return", "model_expression" ]
We support specifying model expressions as strings, lists, or dicts; but for use with patsy and statsmodels we need a string. This function will take any of those as input and return a string. Parameters ---------- expr : str, iterable, or dict A string will be returned unmodified except to add or remove a constant. An iterable sequence will be joined together with ' + '. A dictionary should have ``right_side`` and, optionally, ``left_side`` keys. The ``right_side`` can be a list or a string and will be handled as above. If ``left_side`` is present it will be joined with ``right_side`` with ' ~ '. add_constant : bool, optional Whether to add a ' + 1' (if True) or ' - 1' (if False) to the model. If the expression already has a '+ 1' or '- 1' this option will be ignored. Returns ------- model_expression : str A string model expression suitable for use with statsmodels and patsy.
[ "We", "support", "specifying", "model", "expressions", "as", "strings", "lists", "or", "dicts", ";", "but", "for", "use", "with", "patsy", "and", "statsmodels", "we", "need", "a", "string", ".", "This", "function", "will", "take", "any", "of", "those", "as", "input", "and", "return", "a", "string", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L174-L227
UDST/urbansim
urbansim/models/util.py
sorted_groupby
def sorted_groupby(df, groupby): """ Perform a groupby on a DataFrame using a specific column and assuming that that column is sorted. Parameters ---------- df : pandas.DataFrame groupby : object Column name on which to groupby. This column must be sorted. Returns ------- generator Yields pairs of group_name, DataFrame. """ start = 0 prev = df[groupby].iloc[start] for i, x in enumerate(df[groupby]): if x != prev: yield prev, df.iloc[start:i] prev = x start = i # need to send back the last group yield prev, df.iloc[start:]
python
def sorted_groupby(df, groupby): start = 0 prev = df[groupby].iloc[start] for i, x in enumerate(df[groupby]): if x != prev: yield prev, df.iloc[start:i] prev = x start = i yield prev, df.iloc[start:]
[ "def", "sorted_groupby", "(", "df", ",", "groupby", ")", ":", "start", "=", "0", "prev", "=", "df", "[", "groupby", "]", ".", "iloc", "[", "start", "]", "for", "i", ",", "x", "in", "enumerate", "(", "df", "[", "groupby", "]", ")", ":", "if", "x", "!=", "prev", ":", "yield", "prev", ",", "df", ".", "iloc", "[", "start", ":", "i", "]", "prev", "=", "x", "start", "=", "i", "# need to send back the last group", "yield", "prev", ",", "df", ".", "iloc", "[", "start", ":", "]" ]
Perform a groupby on a DataFrame using a specific column and assuming that that column is sorted. Parameters ---------- df : pandas.DataFrame groupby : object Column name on which to groupby. This column must be sorted. Returns ------- generator Yields pairs of group_name, DataFrame.
[ "Perform", "a", "groupby", "on", "a", "DataFrame", "using", "a", "specific", "column", "and", "assuming", "that", "that", "column", "is", "sorted", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L230-L255
UDST/urbansim
urbansim/models/util.py
columns_in_filters
def columns_in_filters(filters): """ Returns a list of the columns used in a set of query filters. Parameters ---------- filters : list of str or str List of the filters as passed passed to ``apply_filter_query``. Returns ------- columns : list of str List of all the strings mentioned in the filters. """ if not filters: return [] if not isinstance(filters, str): filters = ' '.join(filters) columns = [] reserved = {'and', 'or', 'in', 'not'} for toknum, tokval, _, _, _ in generate_tokens(StringIO(filters).readline): if toknum == NAME and tokval not in reserved: columns.append(tokval) return list(tz.unique(columns))
python
def columns_in_filters(filters): if not filters: return [] if not isinstance(filters, str): filters = ' '.join(filters) columns = [] reserved = {'and', 'or', 'in', 'not'} for toknum, tokval, _, _, _ in generate_tokens(StringIO(filters).readline): if toknum == NAME and tokval not in reserved: columns.append(tokval) return list(tz.unique(columns))
[ "def", "columns_in_filters", "(", "filters", ")", ":", "if", "not", "filters", ":", "return", "[", "]", "if", "not", "isinstance", "(", "filters", ",", "str", ")", ":", "filters", "=", "' '", ".", "join", "(", "filters", ")", "columns", "=", "[", "]", "reserved", "=", "{", "'and'", ",", "'or'", ",", "'in'", ",", "'not'", "}", "for", "toknum", ",", "tokval", ",", "_", ",", "_", ",", "_", "in", "generate_tokens", "(", "StringIO", "(", "filters", ")", ".", "readline", ")", ":", "if", "toknum", "==", "NAME", "and", "tokval", "not", "in", "reserved", ":", "columns", ".", "append", "(", "tokval", ")", "return", "list", "(", "tz", ".", "unique", "(", "columns", ")", ")" ]
Returns a list of the columns used in a set of query filters. Parameters ---------- filters : list of str or str List of the filters as passed passed to ``apply_filter_query``. Returns ------- columns : list of str List of all the strings mentioned in the filters.
[ "Returns", "a", "list", "of", "the", "columns", "used", "in", "a", "set", "of", "query", "filters", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L258-L286
UDST/urbansim
urbansim/models/util.py
_tokens_from_patsy
def _tokens_from_patsy(node): """ Yields all the individual tokens from within a patsy formula as parsed by patsy.parse_formula.parse_formula. Parameters ---------- node : patsy.parse_formula.ParseNode """ for n in node.args: for t in _tokens_from_patsy(n): yield t if node.token: yield node.token
python
def _tokens_from_patsy(node): for n in node.args: for t in _tokens_from_patsy(n): yield t if node.token: yield node.token
[ "def", "_tokens_from_patsy", "(", "node", ")", ":", "for", "n", "in", "node", ".", "args", ":", "for", "t", "in", "_tokens_from_patsy", "(", "n", ")", ":", "yield", "t", "if", "node", ".", "token", ":", "yield", "node", ".", "token" ]
Yields all the individual tokens from within a patsy formula as parsed by patsy.parse_formula.parse_formula. Parameters ---------- node : patsy.parse_formula.ParseNode
[ "Yields", "all", "the", "individual", "tokens", "from", "within", "a", "patsy", "formula", "as", "parsed", "by", "patsy", ".", "parse_formula", ".", "parse_formula", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L289-L304
UDST/urbansim
urbansim/models/util.py
columns_in_formula
def columns_in_formula(formula): """ Returns the names of all the columns used in a patsy formula. Parameters ---------- formula : str, iterable, or dict Any formula construction supported by ``str_model_expression``. Returns ------- columns : list of str """ if formula is None: return [] formula = str_model_expression(formula, add_constant=False) columns = [] tokens = map( lambda x: x.extra, tz.remove( lambda x: x.extra is None, _tokens_from_patsy(patsy.parse_formula.parse_formula(formula)))) for tok in tokens: # if there are parentheses in the expression we # want to drop them and everything outside # and start again from the top if '(' in tok: start = tok.find('(') + 1 fin = tok.rfind(')') columns.extend(columns_in_formula(tok[start:fin])) else: for toknum, tokval, _, _, _ in generate_tokens( StringIO(tok).readline): if toknum == NAME: columns.append(tokval) return list(tz.unique(columns))
python
def columns_in_formula(formula): if formula is None: return [] formula = str_model_expression(formula, add_constant=False) columns = [] tokens = map( lambda x: x.extra, tz.remove( lambda x: x.extra is None, _tokens_from_patsy(patsy.parse_formula.parse_formula(formula)))) for tok in tokens: if '(' in tok: start = tok.find('(') + 1 fin = tok.rfind(')') columns.extend(columns_in_formula(tok[start:fin])) else: for toknum, tokval, _, _, _ in generate_tokens( StringIO(tok).readline): if toknum == NAME: columns.append(tokval) return list(tz.unique(columns))
[ "def", "columns_in_formula", "(", "formula", ")", ":", "if", "formula", "is", "None", ":", "return", "[", "]", "formula", "=", "str_model_expression", "(", "formula", ",", "add_constant", "=", "False", ")", "columns", "=", "[", "]", "tokens", "=", "map", "(", "lambda", "x", ":", "x", ".", "extra", ",", "tz", ".", "remove", "(", "lambda", "x", ":", "x", ".", "extra", "is", "None", ",", "_tokens_from_patsy", "(", "patsy", ".", "parse_formula", ".", "parse_formula", "(", "formula", ")", ")", ")", ")", "for", "tok", "in", "tokens", ":", "# if there are parentheses in the expression we", "# want to drop them and everything outside", "# and start again from the top", "if", "'('", "in", "tok", ":", "start", "=", "tok", ".", "find", "(", "'('", ")", "+", "1", "fin", "=", "tok", ".", "rfind", "(", "')'", ")", "columns", ".", "extend", "(", "columns_in_formula", "(", "tok", "[", "start", ":", "fin", "]", ")", ")", "else", ":", "for", "toknum", ",", "tokval", ",", "_", ",", "_", ",", "_", "in", "generate_tokens", "(", "StringIO", "(", "tok", ")", ".", "readline", ")", ":", "if", "toknum", "==", "NAME", ":", "columns", ".", "append", "(", "tokval", ")", "return", "list", "(", "tz", ".", "unique", "(", "columns", ")", ")" ]
Returns the names of all the columns used in a patsy formula. Parameters ---------- formula : str, iterable, or dict Any formula construction supported by ``str_model_expression``. Returns ------- columns : list of str
[ "Returns", "the", "names", "of", "all", "the", "columns", "used", "in", "a", "patsy", "formula", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L307-L347
UDST/urbansim
urbansim/models/regression.py
fit_model
def fit_model(df, filters, model_expression): """ Use statsmodels OLS to construct a model relation. Parameters ---------- df : pandas.DataFrame Data to use for fit. Should contain all the columns referenced in the `model_expression`. filters : list of str Any filters to apply before doing the model fit. model_expression : str A patsy model expression that can be used with statsmodels. Should contain both the left- and right-hand sides. Returns ------- fit : statsmodels.regression.linear_model.OLSResults """ df = util.apply_filter_query(df, filters) model = smf.ols(formula=model_expression, data=df) if len(model.exog) != len(df): raise ModelEvaluationError( 'Estimated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') with log_start_finish('statsmodels OLS fit', logger): return model.fit()
python
def fit_model(df, filters, model_expression): df = util.apply_filter_query(df, filters) model = smf.ols(formula=model_expression, data=df) if len(model.exog) != len(df): raise ModelEvaluationError( 'Estimated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') with log_start_finish('statsmodels OLS fit', logger): return model.fit()
[ "def", "fit_model", "(", "df", ",", "filters", ",", "model_expression", ")", ":", "df", "=", "util", ".", "apply_filter_query", "(", "df", ",", "filters", ")", "model", "=", "smf", ".", "ols", "(", "formula", "=", "model_expression", ",", "data", "=", "df", ")", "if", "len", "(", "model", ".", "exog", ")", "!=", "len", "(", "df", ")", ":", "raise", "ModelEvaluationError", "(", "'Estimated data does not have the same length as input. '", "'This suggests there are null values in one or more of '", "'the input columns.'", ")", "with", "log_start_finish", "(", "'statsmodels OLS fit'", ",", "logger", ")", ":", "return", "model", ".", "fit", "(", ")" ]
Use statsmodels OLS to construct a model relation. Parameters ---------- df : pandas.DataFrame Data to use for fit. Should contain all the columns referenced in the `model_expression`. filters : list of str Any filters to apply before doing the model fit. model_expression : str A patsy model expression that can be used with statsmodels. Should contain both the left- and right-hand sides. Returns ------- fit : statsmodels.regression.linear_model.OLSResults
[ "Use", "statsmodels", "OLS", "to", "construct", "a", "model", "relation", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L25-L55
UDST/urbansim
urbansim/models/regression.py
predict
def predict(df, filters, model_fit, ytransform=None): """ Apply model to new data to predict new dependent values. Parameters ---------- df : pandas.DataFrame filters : list of str Any filters to apply before doing prediction. model_fit : statsmodels.regression.linear_model.OLSResults Result of model estimation. ytransform : callable, optional A function to call on the array of predicted output. For example, if the model relation is predicting the log of price, you might pass ``ytransform=np.exp`` so that the results reflect actual price. By default no transformation is applied. Returns ------- result : pandas.Series Predicted values as a pandas Series. Will have the index of `df` after applying filters. """ df = util.apply_filter_query(df, filters) with log_start_finish('statsmodels predict', logger): sim_data = model_fit.predict(df) if len(sim_data) != len(df): raise ModelEvaluationError( 'Predicted data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') if ytransform: sim_data = ytransform(sim_data) return pd.Series(sim_data, index=df.index)
python
def predict(df, filters, model_fit, ytransform=None): df = util.apply_filter_query(df, filters) with log_start_finish('statsmodels predict', logger): sim_data = model_fit.predict(df) if len(sim_data) != len(df): raise ModelEvaluationError( 'Predicted data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') if ytransform: sim_data = ytransform(sim_data) return pd.Series(sim_data, index=df.index)
[ "def", "predict", "(", "df", ",", "filters", ",", "model_fit", ",", "ytransform", "=", "None", ")", ":", "df", "=", "util", ".", "apply_filter_query", "(", "df", ",", "filters", ")", "with", "log_start_finish", "(", "'statsmodels predict'", ",", "logger", ")", ":", "sim_data", "=", "model_fit", ".", "predict", "(", "df", ")", "if", "len", "(", "sim_data", ")", "!=", "len", "(", "df", ")", ":", "raise", "ModelEvaluationError", "(", "'Predicted data does not have the same length as input. '", "'This suggests there are null values in one or more of '", "'the input columns.'", ")", "if", "ytransform", ":", "sim_data", "=", "ytransform", "(", "sim_data", ")", "return", "pd", ".", "Series", "(", "sim_data", ",", "index", "=", "df", ".", "index", ")" ]
Apply model to new data to predict new dependent values. Parameters ---------- df : pandas.DataFrame filters : list of str Any filters to apply before doing prediction. model_fit : statsmodels.regression.linear_model.OLSResults Result of model estimation. ytransform : callable, optional A function to call on the array of predicted output. For example, if the model relation is predicting the log of price, you might pass ``ytransform=np.exp`` so that the results reflect actual price. By default no transformation is applied. Returns ------- result : pandas.Series Predicted values as a pandas Series. Will have the index of `df` after applying filters.
[ "Apply", "model", "to", "new", "data", "to", "predict", "new", "dependent", "values", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L58-L97
UDST/urbansim
urbansim/models/regression.py
_model_fit_to_table
def _model_fit_to_table(fit): """ Produce a pandas DataFrame of model fit results from a statsmodels fit result object. Parameters ---------- fit : statsmodels.regression.linear_model.RegressionResults Returns ------- fit_parameters : pandas.DataFrame Will have columns 'Coefficient', 'Std. Error', and 'T-Score'. Index will be model terms. This frame will also have non-standard attributes .rsquared and .rsquared_adj with the same meaning and value as on `fit`. """ fit_parameters = pd.DataFrame( {'Coefficient': fit.params, 'Std. Error': fit.bse, 'T-Score': fit.tvalues}) fit_parameters.rsquared = fit.rsquared fit_parameters.rsquared_adj = fit.rsquared_adj return fit_parameters
python
def _model_fit_to_table(fit): fit_parameters = pd.DataFrame( {'Coefficient': fit.params, 'Std. Error': fit.bse, 'T-Score': fit.tvalues}) fit_parameters.rsquared = fit.rsquared fit_parameters.rsquared_adj = fit.rsquared_adj return fit_parameters
[ "def", "_model_fit_to_table", "(", "fit", ")", ":", "fit_parameters", "=", "pd", ".", "DataFrame", "(", "{", "'Coefficient'", ":", "fit", ".", "params", ",", "'Std. Error'", ":", "fit", ".", "bse", ",", "'T-Score'", ":", "fit", ".", "tvalues", "}", ")", "fit_parameters", ".", "rsquared", "=", "fit", ".", "rsquared", "fit_parameters", ".", "rsquared_adj", "=", "fit", ".", "rsquared_adj", "return", "fit_parameters" ]
Produce a pandas DataFrame of model fit results from a statsmodels fit result object. Parameters ---------- fit : statsmodels.regression.linear_model.RegressionResults Returns ------- fit_parameters : pandas.DataFrame Will have columns 'Coefficient', 'Std. Error', and 'T-Score'. Index will be model terms. This frame will also have non-standard attributes .rsquared and .rsquared_adj with the same meaning and value as on `fit`.
[ "Produce", "a", "pandas", "DataFrame", "of", "model", "fit", "results", "from", "a", "statsmodels", "fit", "result", "object", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L178-L204
UDST/urbansim
urbansim/models/regression.py
_FakeRegressionResults.predict
def predict(self, data): """ Predict new values by running data through the fit model. Parameters ---------- data : pandas.DataFrame Table with columns corresponding to the RHS of `model_expression`. Returns ------- predicted : ndarray Array of predicted values. """ with log_start_finish('_FakeRegressionResults prediction', logger): model_design = dmatrix( self._rhs, data=data, return_type='dataframe') return model_design.dot(self.params).values
python
def predict(self, data): with log_start_finish('_FakeRegressionResults prediction', logger): model_design = dmatrix( self._rhs, data=data, return_type='dataframe') return model_design.dot(self.params).values
[ "def", "predict", "(", "self", ",", "data", ")", ":", "with", "log_start_finish", "(", "'_FakeRegressionResults prediction'", ",", "logger", ")", ":", "model_design", "=", "dmatrix", "(", "self", ".", "_rhs", ",", "data", "=", "data", ",", "return_type", "=", "'dataframe'", ")", "return", "model_design", ".", "dot", "(", "self", ".", "params", ")", ".", "values" ]
Predict new values by running data through the fit model. Parameters ---------- data : pandas.DataFrame Table with columns corresponding to the RHS of `model_expression`. Returns ------- predicted : ndarray Array of predicted values.
[ "Predict", "new", "values", "by", "running", "data", "through", "the", "fit", "model", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L157-L175
UDST/urbansim
urbansim/models/regression.py
RegressionModel.from_yaml
def from_yaml(cls, yaml_str=None, str_or_buffer=None): """ Create a RegressionModel instance from a saved YAML configuration. Arguments are mutually exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- RegressionModel """ cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) model = cls( cfg['fit_filters'], cfg['predict_filters'], cfg['model_expression'], YTRANSFORM_MAPPING[cfg['ytransform']], cfg['name']) if 'fitted' in cfg and cfg['fitted']: fit_parameters = pd.DataFrame(cfg['fit_parameters']) fit_parameters.rsquared = cfg['fit_rsquared'] fit_parameters.rsquared_adj = cfg['fit_rsquared_adj'] model.model_fit = _FakeRegressionResults( model.str_model_expression, fit_parameters, cfg['fit_rsquared'], cfg['fit_rsquared_adj']) model.fit_parameters = fit_parameters logger.debug('loaded regression model {} from YAML'.format(model.name)) return model
python
def from_yaml(cls, yaml_str=None, str_or_buffer=None): cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) model = cls( cfg['fit_filters'], cfg['predict_filters'], cfg['model_expression'], YTRANSFORM_MAPPING[cfg['ytransform']], cfg['name']) if 'fitted' in cfg and cfg['fitted']: fit_parameters = pd.DataFrame(cfg['fit_parameters']) fit_parameters.rsquared = cfg['fit_rsquared'] fit_parameters.rsquared_adj = cfg['fit_rsquared_adj'] model.model_fit = _FakeRegressionResults( model.str_model_expression, fit_parameters, cfg['fit_rsquared'], cfg['fit_rsquared_adj']) model.fit_parameters = fit_parameters logger.debug('loaded regression model {} from YAML'.format(model.name)) return model
[ "def", "from_yaml", "(", "cls", ",", "yaml_str", "=", "None", ",", "str_or_buffer", "=", "None", ")", ":", "cfg", "=", "yamlio", ".", "yaml_to_dict", "(", "yaml_str", ",", "str_or_buffer", ")", "model", "=", "cls", "(", "cfg", "[", "'fit_filters'", "]", ",", "cfg", "[", "'predict_filters'", "]", ",", "cfg", "[", "'model_expression'", "]", ",", "YTRANSFORM_MAPPING", "[", "cfg", "[", "'ytransform'", "]", "]", ",", "cfg", "[", "'name'", "]", ")", "if", "'fitted'", "in", "cfg", "and", "cfg", "[", "'fitted'", "]", ":", "fit_parameters", "=", "pd", ".", "DataFrame", "(", "cfg", "[", "'fit_parameters'", "]", ")", "fit_parameters", ".", "rsquared", "=", "cfg", "[", "'fit_rsquared'", "]", "fit_parameters", ".", "rsquared_adj", "=", "cfg", "[", "'fit_rsquared_adj'", "]", "model", ".", "model_fit", "=", "_FakeRegressionResults", "(", "model", ".", "str_model_expression", ",", "fit_parameters", ",", "cfg", "[", "'fit_rsquared'", "]", ",", "cfg", "[", "'fit_rsquared_adj'", "]", ")", "model", ".", "fit_parameters", "=", "fit_parameters", "logger", ".", "debug", "(", "'loaded regression model {} from YAML'", ".", "format", "(", "model", ".", "name", ")", ")", "return", "model" ]
Create a RegressionModel instance from a saved YAML configuration. Arguments are mutually exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- RegressionModel
[ "Create", "a", "RegressionModel", "instance", "from", "a", "saved", "YAML", "configuration", ".", "Arguments", "are", "mutually", "exclusive", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L260-L298
UDST/urbansim
urbansim/models/regression.py
RegressionModel.fit
def fit(self, data, debug=False): """ Fit the model to data and store/return the results. Parameters ---------- data : pandas.DataFrame Data to use for fitting the model. Must contain all the columns referenced by the `model_expression`. debug : bool If debug is set to true, this sets the attribute "est_data" to a dataframe with the actual data used for estimation of this model. Returns ------- fit : statsmodels.regression.linear_model.OLSResults This is returned for inspection, but also stored on the class instance for use during prediction. """ with log_start_finish('fitting model {}'.format(self.name), logger): fit = fit_model(data, self.fit_filters, self.str_model_expression) self.model_fit = fit self.fit_parameters = _model_fit_to_table(fit) if debug: index = util.apply_filter_query(data, self.fit_filters).index assert len(fit.model.exog) == len(index), ( "The estimate data is unequal in length to the original " "dataframe, usually caused by nans") df = pd.DataFrame( fit.model.exog, columns=fit.model.exog_names, index=index) df[fit.model.endog_names] = fit.model.endog df["fittedvalues"] = fit.fittedvalues df["residuals"] = fit.resid self.est_data = df return fit
python
def fit(self, data, debug=False): with log_start_finish('fitting model {}'.format(self.name), logger): fit = fit_model(data, self.fit_filters, self.str_model_expression) self.model_fit = fit self.fit_parameters = _model_fit_to_table(fit) if debug: index = util.apply_filter_query(data, self.fit_filters).index assert len(fit.model.exog) == len(index), ( "The estimate data is unequal in length to the original " "dataframe, usually caused by nans") df = pd.DataFrame( fit.model.exog, columns=fit.model.exog_names, index=index) df[fit.model.endog_names] = fit.model.endog df["fittedvalues"] = fit.fittedvalues df["residuals"] = fit.resid self.est_data = df return fit
[ "def", "fit", "(", "self", ",", "data", ",", "debug", "=", "False", ")", ":", "with", "log_start_finish", "(", "'fitting model {}'", ".", "format", "(", "self", ".", "name", ")", ",", "logger", ")", ":", "fit", "=", "fit_model", "(", "data", ",", "self", ".", "fit_filters", ",", "self", ".", "str_model_expression", ")", "self", ".", "model_fit", "=", "fit", "self", ".", "fit_parameters", "=", "_model_fit_to_table", "(", "fit", ")", "if", "debug", ":", "index", "=", "util", ".", "apply_filter_query", "(", "data", ",", "self", ".", "fit_filters", ")", ".", "index", "assert", "len", "(", "fit", ".", "model", ".", "exog", ")", "==", "len", "(", "index", ")", ",", "(", "\"The estimate data is unequal in length to the original \"", "\"dataframe, usually caused by nans\"", ")", "df", "=", "pd", ".", "DataFrame", "(", "fit", ".", "model", ".", "exog", ",", "columns", "=", "fit", ".", "model", ".", "exog_names", ",", "index", "=", "index", ")", "df", "[", "fit", ".", "model", ".", "endog_names", "]", "=", "fit", ".", "model", ".", "endog", "df", "[", "\"fittedvalues\"", "]", "=", "fit", ".", "fittedvalues", "df", "[", "\"residuals\"", "]", "=", "fit", ".", "resid", "self", ".", "est_data", "=", "df", "return", "fit" ]
Fit the model to data and store/return the results. Parameters ---------- data : pandas.DataFrame Data to use for fitting the model. Must contain all the columns referenced by the `model_expression`. debug : bool If debug is set to true, this sets the attribute "est_data" to a dataframe with the actual data used for estimation of this model. Returns ------- fit : statsmodels.regression.linear_model.OLSResults This is returned for inspection, but also stored on the class instance for use during prediction.
[ "Fit", "the", "model", "to", "data", "and", "store", "/", "return", "the", "results", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L309-L346
UDST/urbansim
urbansim/models/regression.py
RegressionModel.report_fit
def report_fit(self): """ Print a report of the fit results. """ if not self.fitted: print('Model not yet fit.') return print('R-Squared: {0:.3f}'.format(self.model_fit.rsquared)) print('Adj. R-Squared: {0:.3f}'.format(self.model_fit.rsquared_adj)) print('') tbl = PrettyTable( ['Component', ]) tbl = PrettyTable() tbl.add_column('Component', self.fit_parameters.index.values) for col in ('Coefficient', 'Std. Error', 'T-Score'): tbl.add_column(col, self.fit_parameters[col].values) tbl.align['Component'] = 'l' tbl.float_format = '.3' print(tbl)
python
def report_fit(self): if not self.fitted: print('Model not yet fit.') return print('R-Squared: {0:.3f}'.format(self.model_fit.rsquared)) print('Adj. R-Squared: {0:.3f}'.format(self.model_fit.rsquared_adj)) print('') tbl = PrettyTable( ['Component', ]) tbl = PrettyTable() tbl.add_column('Component', self.fit_parameters.index.values) for col in ('Coefficient', 'Std. Error', 'T-Score'): tbl.add_column(col, self.fit_parameters[col].values) tbl.align['Component'] = 'l' tbl.float_format = '.3' print(tbl)
[ "def", "report_fit", "(", "self", ")", ":", "if", "not", "self", ".", "fitted", ":", "print", "(", "'Model not yet fit.'", ")", "return", "print", "(", "'R-Squared: {0:.3f}'", ".", "format", "(", "self", ".", "model_fit", ".", "rsquared", ")", ")", "print", "(", "'Adj. R-Squared: {0:.3f}'", ".", "format", "(", "self", ".", "model_fit", ".", "rsquared_adj", ")", ")", "print", "(", "''", ")", "tbl", "=", "PrettyTable", "(", "[", "'Component'", ",", "]", ")", "tbl", "=", "PrettyTable", "(", ")", "tbl", ".", "add_column", "(", "'Component'", ",", "self", ".", "fit_parameters", ".", "index", ".", "values", ")", "for", "col", "in", "(", "'Coefficient'", ",", "'Std. Error'", ",", "'T-Score'", ")", ":", "tbl", ".", "add_column", "(", "col", ",", "self", ".", "fit_parameters", "[", "col", "]", ".", "values", ")", "tbl", ".", "align", "[", "'Component'", "]", "=", "'l'", "tbl", ".", "float_format", "=", "'.3'", "print", "(", "tbl", ")" ]
Print a report of the fit results.
[ "Print", "a", "report", "of", "the", "fit", "results", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L364-L388
UDST/urbansim
urbansim/models/regression.py
RegressionModel.predict
def predict(self, data): """ Predict a new data set based on an estimated model. Parameters ---------- data : pandas.DataFrame Data to use for prediction. Must contain all the columns referenced by the right-hand side of the `model_expression`. Returns ------- result : pandas.Series Predicted values as a pandas Series. Will have the index of `data` after applying filters. """ self.assert_fitted() with log_start_finish('predicting model {}'.format(self.name), logger): return predict( data, self.predict_filters, self.model_fit, self.ytransform)
python
def predict(self, data): self.assert_fitted() with log_start_finish('predicting model {}'.format(self.name), logger): return predict( data, self.predict_filters, self.model_fit, self.ytransform)
[ "def", "predict", "(", "self", ",", "data", ")", ":", "self", ".", "assert_fitted", "(", ")", "with", "log_start_finish", "(", "'predicting model {}'", ".", "format", "(", "self", ".", "name", ")", ",", "logger", ")", ":", "return", "predict", "(", "data", ",", "self", ".", "predict_filters", ",", "self", ".", "model_fit", ",", "self", ".", "ytransform", ")" ]
Predict a new data set based on an estimated model. Parameters ---------- data : pandas.DataFrame Data to use for prediction. Must contain all the columns referenced by the right-hand side of the `model_expression`. Returns ------- result : pandas.Series Predicted values as a pandas Series. Will have the index of `data` after applying filters.
[ "Predict", "a", "new", "data", "set", "based", "on", "an", "estimated", "model", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L390-L410
UDST/urbansim
urbansim/models/regression.py
RegressionModel.to_dict
def to_dict(self): """ Returns a dictionary representation of a RegressionModel instance. """ d = { 'model_type': 'regression', 'name': self.name, 'fit_filters': self.fit_filters, 'predict_filters': self.predict_filters, 'model_expression': self.model_expression, 'ytransform': YTRANSFORM_MAPPING[self.ytransform], 'fitted': self.fitted, 'fit_parameters': None, 'fit_rsquared': None, 'fit_rsquared_adj': None } if self.fitted: d['fit_parameters'] = yamlio.frame_to_yaml_safe( self.fit_parameters) d['fit_rsquared'] = float(self.model_fit.rsquared) d['fit_rsquared_adj'] = float(self.model_fit.rsquared_adj) return d
python
def to_dict(self): d = { 'model_type': 'regression', 'name': self.name, 'fit_filters': self.fit_filters, 'predict_filters': self.predict_filters, 'model_expression': self.model_expression, 'ytransform': YTRANSFORM_MAPPING[self.ytransform], 'fitted': self.fitted, 'fit_parameters': None, 'fit_rsquared': None, 'fit_rsquared_adj': None } if self.fitted: d['fit_parameters'] = yamlio.frame_to_yaml_safe( self.fit_parameters) d['fit_rsquared'] = float(self.model_fit.rsquared) d['fit_rsquared_adj'] = float(self.model_fit.rsquared_adj) return d
[ "def", "to_dict", "(", "self", ")", ":", "d", "=", "{", "'model_type'", ":", "'regression'", ",", "'name'", ":", "self", ".", "name", ",", "'fit_filters'", ":", "self", ".", "fit_filters", ",", "'predict_filters'", ":", "self", ".", "predict_filters", ",", "'model_expression'", ":", "self", ".", "model_expression", ",", "'ytransform'", ":", "YTRANSFORM_MAPPING", "[", "self", ".", "ytransform", "]", ",", "'fitted'", ":", "self", ".", "fitted", ",", "'fit_parameters'", ":", "None", ",", "'fit_rsquared'", ":", "None", ",", "'fit_rsquared_adj'", ":", "None", "}", "if", "self", ".", "fitted", ":", "d", "[", "'fit_parameters'", "]", "=", "yamlio", ".", "frame_to_yaml_safe", "(", "self", ".", "fit_parameters", ")", "d", "[", "'fit_rsquared'", "]", "=", "float", "(", "self", ".", "model_fit", ".", "rsquared", ")", "d", "[", "'fit_rsquared_adj'", "]", "=", "float", "(", "self", ".", "model_fit", ".", "rsquared_adj", ")", "return", "d" ]
Returns a dictionary representation of a RegressionModel instance.
[ "Returns", "a", "dictionary", "representation", "of", "a", "RegressionModel", "instance", "." ]
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L412-L436