docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
This function group statistics of same tasks by adding them. It also add 'instances_count' statistic to get information about how many instances is running on the server Args: result: result of mesos query. List of dictionaries with 'executor_id', 'framework_id' as a strings and 'statistics' as dictionary of labeled numbers
def _group_and_publish_tasks_statistics(self, result): for i in result: executor_id = i['executor_id'] i['executor_id'] = executor_id[:executor_id.rfind('.')] i['statistics']['instances_count'] = 1 r = {} for i in result: executor_id = i['executor_id'] r[executor_id] = r.get(executor_id, {}) r[executor_id]['framework_id'] = i['framework_id'] r[executor_id]['statistics'] = r[executor_id].get('statistics', {}) r[executor_id]['statistics'] = self._sum_statistics( i['statistics'], r[executor_id]['statistics']) self._add_cpu_usage(r) self._add_cpu_percent(r) self._add_mem_percent(r) self._publish(r)
188,402
After using ``export_vtkjs()`` to create a ``.vtkjs`` file from a data scene which is uploaded to an online file hosting service like Dropbox, use this method to get a shareable link to that scene on the `PVGeo VTKjs viewer`_. .. _PVGeo VTKjs viewer: http://viewer.vtki.org **Current file hosts supported:** - Dropbox Args: host (str): the name of the file hosting service. inURL (str): the web URL to the ``.vtkjs`` file.
def get_vtkjs_url(*args): if len(args) == 1: host = 'dropbox' inURL = args[0] elif len(args) == 2: host = args[0] inURL = args[1] else: raise RuntimeError('Arguments not understood.') if host.lower() == "dropbox": convertURL = convert_dropbox_url(inURL) else: print("--> Warning: Web host not specified or supported. URL is simply appended to standalone scene loader link.") convertURL = inURL #print("--> Your link: %s" % generate_viewer_url(convertURL)) return generate_viewer_url(convertURL)
189,518
Selects columns where fun(ction) is true Args: fun: a function that will be applied to columns
def select_if(df, fun): def _filter_f(col): try: return fun(df[col]) except: return False cols = list(filter(_filter_f, df.columns)) return df[cols]
190,468
Drops columns where fun(ction) is true Args: fun: a function that will be applied to columns
def drop_if(df, fun): def _filter_f(col): try: return fun(df[col]) except: return False cols = list(filter(_filter_f, df.columns)) return df.drop(cols, axis=1)
190,469
Returns the mean of a series. Args: series (pandas.Series): column to summarize.
def mean(series): if np.issubdtype(series.dtype, np.number): return series.mean() else: return np.nan
190,479
Returns the first value of a series. Args: series (pandas.Series): column to summarize. Kwargs: order_by: a pandas.Series or list of series (can be symbolic) to order the input series by before summarization.
def first(series, order_by=None): if order_by is not None: series = order_series_by(series, order_by) first_s = series.iloc[0] return first_s
190,480
Returns the last value of a series. Args: series (pandas.Series): column to summarize. Kwargs: order_by: a pandas.Series or list of series (can be symbolic) to order the input series by before summarization.
def last(series, order_by=None): if order_by is not None: series = order_series_by(series, order_by) last_s = series.iloc[series.size - 1] return last_s
190,481
Returns the nth value of a series. Args: series (pandas.Series): column to summarize. n (integer): position of desired value. Returns `NaN` if out of range. Kwargs: order_by: a pandas.Series or list of series (can be symbolic) to order the input series by before summarization.
def nth(series, n, order_by=None): if order_by is not None: series = order_series_by(series, order_by) try: return series.iloc[n] except: return np.nan
190,482
Returns the median value of a series. Args: series (pandas.Series): column to summarize.
def median(series): if np.issubdtype(series.dtype, np.number): return series.median() else: return np.nan
190,483
Returns the variance of values in a series. Args: series (pandas.Series): column to summarize.
def var(series): if np.issubdtype(series.dtype, np.number): return series.var() else: return np.nan
190,484
Returns the standard deviation of values in a series. Args: series (pandas.Series): column to summarize.
def sd(series): if np.issubdtype(series.dtype, np.number): return series.std() else: return np.nan
190,485
Renames columns, where keyword argument values are the current names of columns and keys are the new names. Args: df (:obj:`pandas.DataFrame`): DataFrame passed in via `>>` pipe. Kwargs: **kwargs: key:value pairs where keys are new names for columns and values are current names of columns.
def rename(df, **kwargs): return df.rename(columns={v: k for k, v in kwargs.items()})
190,523
Returns rows that appear in either DataFrame. Args: df (pandas.DataFrame): data passed in through the pipe. other (pandas.DataFrame): other DataFrame to use for set operation with the first. Kwargs: index (bool): Boolean indicating whether to consider the pandas index as part of the set operation (default `False`). keep (str): Indicates which duplicate should be kept. Options are `'first'` and `'last'`.
def union(df, other, index=False, keep='first'): validate_set_ops(df, other) stacked = df.append(other) if index: stacked_reset_indexes = stacked.reset_index() index_cols = [col for col in stacked_reset_indexes.columns if col not in df.columns] index_name = df.index.names return_df = stacked_reset_indexes.drop_duplicates(keep=keep).set_index(index_cols) return_df.index.names = index_name return return_df else: return stacked.drop_duplicates(keep=keep)
190,530
Returns rows that appear in both DataFrames. Args: df (pandas.DataFrame): data passed in through the pipe. other (pandas.DataFrame): other DataFrame to use for set operation with the first. Kwargs: index (bool): Boolean indicating whether to consider the pandas index as part of the set operation (default `False`). keep (str): Indicates which duplicate should be kept. Options are `'first'` and `'last'`.
def intersect(df, other, index=False, keep='first'): validate_set_ops(df, other) if index: df_reset_index = df.reset_index() other_reset_index = other.reset_index() index_cols = [col for col in df_reset_index.columns if col not in df.columns] df_index_names = df.index.names return_df = (pd.merge(df_reset_index, other_reset_index, how='inner', left_on=df_reset_index.columns.values.tolist(), right_on=df_reset_index.columns.values.tolist()) .set_index(index_cols)) return_df.index.names = df_index_names return_df = return_df.drop_duplicates(keep=keep) return return_df else: return_df = pd.merge(df, other, how='inner', left_on=df.columns.values.tolist(), right_on=df.columns.values.tolist()) return_df = return_df.drop_duplicates(keep=keep) return return_df
190,531
If values in a series match a specified value, change them to `np.nan`. Args: series: Series or vector, often symbolic. *values: Value(s) to convert to `np.nan` in the series.
def na_if(series, *values): series = pd.Series(series) series[series.isin(values)] = np.nan return series
190,538
Equivalent to `series.rank(method='dense', ascending=ascending)`. Args: series: column to rank. Kwargs: ascending (bool): whether to rank in ascending order (default is `True`).
def dense_rank(series, ascending=True): ranks = series.rank(method='dense', ascending=ascending) return ranks
190,540
Equivalent to `series.rank(method='min', ascending=ascending)`. Args: series: column to rank. Kwargs: ascending (bool): whether to rank in ascending order (default is `True`).
def min_rank(series, ascending=True): ranks = series.rank(method='min', ascending=ascending) return ranks
190,541
Calculates cumulative any of values. Equivalent to `series.expanding().apply(np.any).astype(bool)`. Args: series: column to compute cumulative any for.
def cumany(series): anys = series.expanding().apply(np.any).astype(bool) return anys
190,542
Calculates cumulative all of values. Equivalent to `series.expanding().apply(np.all).astype(bool)`. Args: series: column to compute cumulative all for.
def cumall(series): alls = series.expanding().apply(np.all).astype(bool) return alls
190,543
Search a protein sequence against a HMMER sequence database. Arguments: seq - The sequence to search -- a Fasta string. seqdb -- Sequence database to search against. range -- A string range of results to return (ie. 1,10 for the first ten) output -- The output format (defaults to JSON).
def phmmer(**kwargs): logging.debug(kwargs) args = {'seq' : kwargs.get('seq'), 'seqdb' : kwargs.get('seqdb')} args2 = {'output' : kwargs.get('output', 'json'), 'range' : kwargs.get('range')} return _hmmer("http://hmmer.janelia.org/search/phmmer", args, args2)
191,956
Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: dialogflow_v2.SessionEntityTypesClient: The constructed client.
def from_service_account_file(cls, filename, *args, **kwargs): credentials = service_account.Credentials.from_service_account_file( filename) kwargs['credentials'] = credentials return cls(*args, **kwargs)
194,662
Lists the Knowledge bases belonging to a project. Args: project_id: The GCP project linked with the agent.
def list_knowledge_bases(project_id): import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() project_path = client.project_path(project_id) print('Knowledge Bases for: {}'.format(project_id)) for knowledge_base in client.list_knowledge_bases(project_path): print(' - Display Name: {}'.format(knowledge_base.display_name)) print(' - Knowledge ID: {}\n'.format(knowledge_base.name))
194,669
Creates a Knowledge base. Args: project_id: The GCP project linked with the agent. display_name: The display name of the Knowledge base.
def create_knowledge_base(project_id, display_name): import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() project_path = client.project_path(project_id) knowledge_base = dialogflow.types.KnowledgeBase( display_name=display_name) response = client.create_knowledge_base(project_path, knowledge_base) print('Knowledge Base created:\n') print('Display Name: {}\n'.format(response.display_name)) print('Knowledge ID: {}\n'.format(response.name))
194,670
Gets a specific Knowledge base. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base.
def get_knowledge_base(project_id, knowledge_base_id): import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() knowledge_base_path = client.knowledge_base_path( project_id, knowledge_base_id) response = client.get_knowledge_base(knowledge_base_path) print('Got Knowledge Base:') print(' - Display Name: {}'.format(response.display_name)) print(' - Knowledge ID: {}'.format(response.name))
194,671
Deletes a specific Knowledge base. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base.
def delete_knowledge_base(project_id, knowledge_base_id): import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() knowledge_base_path = client.knowledge_base_path( project_id, knowledge_base_id) response = client.delete_knowledge_base(knowledge_base_path) print('Knowledge Base deleted.'.format(response))
194,672
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.ListContexts = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.Contexts/ListContexts', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.ListContextsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.ListContextsResponse.FromString, ) self.GetContext = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.Contexts/GetContext', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.GetContextRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.Context.FromString, ) self.CreateContext = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.Contexts/CreateContext', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.CreateContextRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.Context.FromString, ) self.UpdateContext = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.Contexts/UpdateContext', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.UpdateContextRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.Context.FromString, ) self.DeleteContext = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.Contexts/DeleteContext', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.DeleteContextRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.DeleteAllContexts = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.Contexts/DeleteAllContexts', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.DeleteAllContextsRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
194,681
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.ListSessionEntityTypes = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.SessionEntityTypes/ListSessionEntityTypes', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.ListSessionEntityTypesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.ListSessionEntityTypesResponse.FromString, ) self.GetSessionEntityType = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.SessionEntityTypes/GetSessionEntityType', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.GetSessionEntityTypeRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.SessionEntityType.FromString, ) self.CreateSessionEntityType = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.SessionEntityTypes/CreateSessionEntityType', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.CreateSessionEntityTypeRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.SessionEntityType.FromString, ) self.UpdateSessionEntityType = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.SessionEntityTypes/UpdateSessionEntityType', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.UpdateSessionEntityTypeRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.SessionEntityType.FromString, ) self.DeleteSessionEntityType = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.SessionEntityTypes/DeleteSessionEntityType', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.DeleteSessionEntityTypeRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
194,688
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.DetectIntent = channel.unary_unary( '/google.cloud.dialogflow.v2.Sessions/DetectIntent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.DetectIntentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.DetectIntentResponse.FromString, ) self.StreamingDetectIntent = channel.stream_stream( '/google.cloud.dialogflow.v2.Sessions/StreamingDetectIntent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.StreamingDetectIntentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.StreamingDetectIntentResponse.FromString, )
194,702
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.ListKnowledgeBases = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.KnowledgeBases/ListKnowledgeBases', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.ListKnowledgeBasesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.ListKnowledgeBasesResponse.FromString, ) self.GetKnowledgeBase = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.KnowledgeBases/GetKnowledgeBase', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.GetKnowledgeBaseRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.KnowledgeBase.FromString, ) self.CreateKnowledgeBase = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.KnowledgeBases/CreateKnowledgeBase', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.CreateKnowledgeBaseRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.KnowledgeBase.FromString, ) self.DeleteKnowledgeBase = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.KnowledgeBases/DeleteKnowledgeBase', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.DeleteKnowledgeBaseRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
194,719
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.ListIntents = channel.unary_unary( '/google.cloud.dialogflow.v2.Intents/ListIntents', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.ListIntentsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.ListIntentsResponse.FromString, ) self.GetIntent = channel.unary_unary( '/google.cloud.dialogflow.v2.Intents/GetIntent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.GetIntentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString, ) self.CreateIntent = channel.unary_unary( '/google.cloud.dialogflow.v2.Intents/CreateIntent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.CreateIntentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString, ) self.UpdateIntent = channel.unary_unary( '/google.cloud.dialogflow.v2.Intents/UpdateIntent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.UpdateIntentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString, ) self.DeleteIntent = channel.unary_unary( '/google.cloud.dialogflow.v2.Intents/DeleteIntent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.DeleteIntentRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.BatchUpdateIntents = channel.unary_unary( '/google.cloud.dialogflow.v2.Intents/BatchUpdateIntents', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.BatchUpdateIntentsRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.BatchDeleteIntents = channel.unary_unary( '/google.cloud.dialogflow.v2.Intents/BatchDeleteIntents', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.BatchDeleteIntentsRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, )
194,720
Returns the result of detect intent with querying Knowledge Connector. Args: project_id: The GCP project linked with the agent you are going to query. session_id: Id of the session, using the same `session_id` between requests allows continuation of the conversation. language_code: Language of the queries. knowledge_base_id: The Knowledge base's id to query against. texts: A list of text queries to send.
def detect_intent_knowledge(project_id, session_id, language_code, knowledge_base_id, texts): import dialogflow_v2beta1 as dialogflow session_client = dialogflow.SessionsClient() session_path = session_client.session_path(project_id, session_id) print('Session path: {}\n'.format(session_path)) for text in texts: text_input = dialogflow.types.TextInput( text=text, language_code=language_code) query_input = dialogflow.types.QueryInput(text=text_input) knowledge_base_path = dialogflow.knowledge_bases_client \ .KnowledgeBasesClient \ .knowledge_base_path(project_id, knowledge_base_id) query_params = dialogflow.types.QueryParameters( knowledge_base_names=[knowledge_base_path]) response = session_client.detect_intent( session=session_path, query_input=query_input, query_params=query_params) print('=' * 20) print('Query text: {}'.format(response.query_result.query_text)) print('Detected intent: {} (confidence: {})\n'.format( response.query_result.intent.display_name, response.query_result.intent_detection_confidence)) print('Fulfillment text: {}\n'.format( response.query_result.fulfillment_text)) print('Knowledge results:') knowledge_answers = response.query_result.knowledge_answers for answers in knowledge_answers.answers: print(' - Answer: {}'.format(answers.answer)) print(' - Confidence: {}'.format( answers.match_confidence))
194,721
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.ListEntityTypes = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.EntityTypes/ListEntityTypes', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.ListEntityTypesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.ListEntityTypesResponse.FromString, ) self.GetEntityType = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.EntityTypes/GetEntityType', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.GetEntityTypeRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString, ) self.CreateEntityType = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.EntityTypes/CreateEntityType', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.CreateEntityTypeRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString, ) self.UpdateEntityType = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.EntityTypes/UpdateEntityType', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.UpdateEntityTypeRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString, ) self.DeleteEntityType = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.EntityTypes/DeleteEntityType', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.DeleteEntityTypeRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.BatchUpdateEntityTypes = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchUpdateEntityTypes', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchUpdateEntityTypesRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.BatchDeleteEntityTypes = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchDeleteEntityTypes', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchDeleteEntityTypesRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.BatchCreateEntities = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchCreateEntities', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchCreateEntitiesRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.BatchUpdateEntities = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchUpdateEntities', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchUpdateEntitiesRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.BatchDeleteEntities = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchDeleteEntities', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchDeleteEntitiesRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, )
194,723
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.GetAgent = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/GetAgent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.GetAgentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.Agent.FromString, ) self.SearchAgents = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/SearchAgents', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.SearchAgentsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.SearchAgentsResponse.FromString, ) self.TrainAgent = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/TrainAgent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.TrainAgentRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.ExportAgent = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/ExportAgent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.ExportAgentRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.ImportAgent = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/ImportAgent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.ImportAgentRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.RestoreAgent = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/RestoreAgent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.RestoreAgentRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, )
194,728
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.ListDocuments = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.Documents/ListDocuments', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.ListDocumentsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.ListDocumentsResponse.FromString, ) self.GetDocument = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.Documents/GetDocument', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.GetDocumentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.Document.FromString, ) self.CreateDocument = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.Documents/CreateDocument', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.CreateDocumentRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.DeleteDocument = channel.unary_unary( '/google.cloud.dialogflow.v2beta1.Documents/DeleteDocument', request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.DeleteDocumentRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, )
194,748
Lists the Documents belonging to a Knowledge base. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base.
def list_documents(project_id, knowledge_base_id): import dialogflow_v2beta1 as dialogflow client = dialogflow.DocumentsClient() knowledge_base_path = client.knowledge_base_path(project_id, knowledge_base_id) print('Documents for Knowledge Id: {}'.format(knowledge_base_id)) for document in client.list_documents(knowledge_base_path): print(' - Display Name: {}'.format(document.display_name)) print(' - Knowledge ID: {}'.format(document.name)) print(' - MIME Type: {}'.format(document.mime_type)) print(' - Knowledge Types:') for knowledge_type in document.knowledge_types: print(' - {}'.format(KNOWLEDGE_TYPES[knowledge_type])) print(' - Source: {}\n'.format(document.content_uri))
194,754
Gets a Document. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base. document_id: Id of the Document.
def get_document(project_id, knowledge_base_id, document_id): import dialogflow_v2beta1 as dialogflow client = dialogflow.DocumentsClient() document_path = client.document_path(project_id, knowledge_base_id, document_id) response = client.get_document(document_path) print('Got Document:') print(' - Display Name: {}'.format(response.display_name)) print(' - Knowledge ID: {}'.format(response.name)) print(' - MIME Type: {}'.format(response.mime_type)) print(' - Knowledge Types:') for knowledge_type in response.knowledge_types: print(' - {}'.format(KNOWLEDGE_TYPES[knowledge_type])) print(' - Source: {}\n'.format(response.content_uri))
194,756
Deletes a Document. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base. document_id: Id of the Document.
def delete_document(project_id, knowledge_base_id, document_id): import dialogflow_v2beta1 as dialogflow client = dialogflow.DocumentsClient() document_path = client.document_path(project_id, knowledge_base_id, document_id) response = client.delete_document(document_path) print('operation running:\n {}'.format(response.operation)) print('Waiting for results...') print('Done.\n {}'.format(response.result()))
194,757
Add indentation to text between the tags by the given indentation level. {% indentby <indent_level> [if <statement>] %} ... {% endindentby %} Arguments: indent_level - Number of spaces to indent text with. statement - Only apply indent_level if the boolean statement evalutates to True.
def indentby(parser, token): args = token.split_contents() largs = len(args) if largs not in (2, 4): raise template.TemplateSyntaxError("indentby tag requires 1 or 3 arguments") indent_level = args[1] if_statement = None if largs == 4: if_statement = args[3] nodelist = parser.parse(('endindentby', )) parser.delete_first_token() return IndentByNode(nodelist, indent_level, if_statement)
196,977
Helper method for ShortestPath, to reconstruct path. Arguments: came_from: a dictionary mapping Point to (Point, Poly) tuples. This dictionary keeps track of the previous neighbor to a node, and the edge used to get from the previous neighbor to the node. current_node: the current Point in the path. Returns: A Poly that represents the path through the graph from the start of the search to current_node.
def _ReconstructPath(self, came_from, current_node): if current_node in came_from: (previous_node, previous_edge) = came_from[current_node] if previous_edge.GetPoint(0) == current_node: previous_edge = previous_edge.Reversed() p = self._ReconstructPath(came_from, previous_node) return Poly.MergePolys([p, previous_edge], merge_point_threshold=0) else: return Poly([], '')
197,260
Add columns to table if they are not already there. Args: table: table name as a string columns: an iterable of column names
def AddTableColumns(self, table, columns): table_columns = self._table_columns.setdefault(table, []) for attr in columns: if attr not in table_columns: table_columns.append(attr)
197,265
Return a list of tuples (date, [period1, period2, ...]). For each date in the range [date_start, date_end) make list of each ServicePeriod object which is active. Args: date_start: The first date in the list, a date object date_end: The first date after the list, a date object Returns: A list of tuples. Each tuple contains a date object and a list of zero or more ServicePeriod objects.
def GetServicePeriodsActiveEachDate(self, date_start, date_end): date_it = date_start one_day = datetime.timedelta(days=1) date_service_period_list = [] while date_it < date_end: periods_today = [] date_it_string = date_it.strftime("%Y%m%d") for service in self.GetServicePeriodList(): if service.IsActiveOn(date_it_string, date_it): periods_today.append(service) date_service_period_list.append((date_it, periods_today)) date_it += one_day return date_service_period_list
197,280
Add a stop to this schedule. Args: lat: Latitude of the stop as a float or string lng: Longitude of the stop as a float or string name: Name of the stop, which will appear in the feed stop_id: stop_id of the stop or None, in which case a unique id is picked Returns: A new Stop object
def AddStop(self, lat, lng, name, stop_id=None): if stop_id is None: stop_id = util.FindUniqueId(self.stops) stop = self._gtfs_factory.Stop(stop_id=stop_id, lat=lat, lng=lng, name=name) self.AddStopObject(stop) return stop
197,281
Add a route to this schedule. Args: short_name: Short name of the route, such as "71L" long_name: Full name of the route, such as "NW 21st Ave/St Helens Rd" route_type: A type such as "Tram", "Subway" or "Bus" route_id: id of the route or None, in which case a unique id is picked Returns: A new Route object
def AddRoute(self, short_name, long_name, route_type, route_id=None): if route_id is None: route_id = util.FindUniqueId(self.routes) route = self._gtfs_factory.Route(short_name=short_name, long_name=long_name, route_type=route_type, route_id=route_id) route.agency_id = self.GetDefaultAgency().agency_id self.AddRouteObject(route) return route
197,283
Output this schedule as a Google Transit Feed in file_name. Args: file: path of new feed file (a string) or a file-like object Returns: None
def WriteGoogleTransitFeed(self, file): # Compression type given when adding each file archive = zipfile.ZipFile(file, 'w') if 'agency' in self._table_columns: agency_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(agency_string) columns = self.GetTableColumns('agency') writer.writerow(columns) for a in self._agencies.values(): writer.writerow([util.EncodeUnicode(a[c]) for c in columns]) self._WriteArchiveString(archive, 'agency.txt', agency_string) if 'feed_info' in self._table_columns: feed_info_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(feed_info_string) columns = self.GetTableColumns('feed_info') writer.writerow(columns) writer.writerow([util.EncodeUnicode(self.feed_info[c]) for c in columns]) self._WriteArchiveString(archive, 'feed_info.txt', feed_info_string) calendar_dates_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(calendar_dates_string) writer.writerow( self._gtfs_factory.ServicePeriod._FIELD_NAMES_CALENDAR_DATES) has_data = False for period in self.service_periods.values(): for row in period.GenerateCalendarDatesFieldValuesTuples(): has_data = True writer.writerow(row) wrote_calendar_dates = False if has_data: wrote_calendar_dates = True self._WriteArchiveString(archive, 'calendar_dates.txt', calendar_dates_string) calendar_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(calendar_string) writer.writerow(self._gtfs_factory.ServicePeriod._FIELD_NAMES) has_data = False for s in self.service_periods.values(): row = s.GetCalendarFieldValuesTuple() if row: has_data = True writer.writerow(row) if has_data or not wrote_calendar_dates: self._WriteArchiveString(archive, 'calendar.txt', calendar_string) if 'stops' in self._table_columns: stop_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(stop_string) columns = self.GetTableColumns('stops') writer.writerow(columns) for s in self.stops.values(): writer.writerow([util.EncodeUnicode(s[c]) for c in columns]) self._WriteArchiveString(archive, 'stops.txt', stop_string) if 'routes' in self._table_columns: route_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(route_string) columns = self.GetTableColumns('routes') writer.writerow(columns) for r in self.routes.values(): writer.writerow([util.EncodeUnicode(r[c]) for c in columns]) self._WriteArchiveString(archive, 'routes.txt', route_string) if 'trips' in self._table_columns: trips_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(trips_string) columns = self.GetTableColumns('trips') writer.writerow(columns) for t in self.trips.values(): writer.writerow([util.EncodeUnicode(t[c]) for c in columns]) self._WriteArchiveString(archive, 'trips.txt', trips_string) # write frequencies.txt (if applicable) headway_rows = [] for trip in self.GetTripList(): headway_rows += trip.GetFrequencyOutputTuples() if headway_rows: headway_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(headway_string) writer.writerow(self._gtfs_factory.Frequency._FIELD_NAMES) writer.writerows(headway_rows) self._WriteArchiveString(archive, 'frequencies.txt', headway_string) # write fares (if applicable) if self.GetFareAttributeList(): fare_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(fare_string) writer.writerow(self._gtfs_factory.FareAttribute._FIELD_NAMES) writer.writerows( f.GetFieldValuesTuple() for f in self.GetFareAttributeList()) self._WriteArchiveString(archive, 'fare_attributes.txt', fare_string) # write fare rules (if applicable) rule_rows = [] for fare in self.GetFareAttributeList(): for rule in fare.GetFareRuleList(): rule_rows.append(rule.GetFieldValuesTuple()) if rule_rows: rule_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(rule_string) writer.writerow(self._gtfs_factory.FareRule._FIELD_NAMES) writer.writerows(rule_rows) self._WriteArchiveString(archive, 'fare_rules.txt', rule_string) stop_times_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(stop_times_string) writer.writerow(self._gtfs_factory.StopTime._FIELD_NAMES) for t in self.trips.values(): writer.writerows(t._GenerateStopTimesTuples()) self._WriteArchiveString(archive, 'stop_times.txt', stop_times_string) # write shapes (if applicable) shape_rows = [] for shape in self.GetShapeList(): seq = 1 for (lat, lon, dist) in shape.points: shape_rows.append((shape.shape_id, lat, lon, seq, dist)) seq += 1 if shape_rows: shape_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(shape_string) writer.writerow(self._gtfs_factory.Shape._FIELD_NAMES) writer.writerows(shape_rows) self._WriteArchiveString(archive, 'shapes.txt', shape_string) if 'transfers' in self._table_columns: transfer_string = StringIO.StringIO() writer = util.CsvUnicodeWriter(transfer_string) columns = self.GetTableColumns('transfers') writer.writerow(columns) for t in self.GetTransferIter(): writer.writerow([util.EncodeUnicode(t[c]) for c in columns]) self._WriteArchiveString(archive, 'transfers.txt', transfer_string) archive.close()
197,296
Return a list of (date object, number of trips, number of departures). The list is generated for dates in the range [date_start, date_end). Args: date_start: The first date in the list, a date object date_end: The first date after the list, a date object Returns: a list of (date object, number of trips, number of departures) tuples
def GenerateDateTripsDeparturesList(self, date_start, date_end): service_id_to_trips = defaultdict(lambda: 0) service_id_to_departures = defaultdict(lambda: 0) for trip in self.GetTripList(): headway_start_times = trip.GetFrequencyStartTimes() if headway_start_times: trip_runs = len(headway_start_times) else: trip_runs = 1 service_id_to_trips[trip.service_id] += trip_runs service_id_to_departures[trip.service_id] += ( (trip.GetCountStopTimes() - 1) * trip_runs) date_services = self.GetServicePeriodsActiveEachDate(date_start, date_end) date_trips = [] for date, services in date_services: day_trips = sum(service_id_to_trips[s.service_id] for s in services) day_departures = sum( service_id_to_departures[s.service_id] for s in services) date_trips.append((date, day_trips, day_departures)) return date_trips
197,297
Validate the start and expiration dates of the feed. Issue a warning if it only starts in the future, or if it expires within 60 days. Args: problems: The problem reporter object first_date: A date object representing the first day the feed is active last_date: A date object representing the last day the feed is active today: A date object representing the date the validation is being run on Returns: None
def ValidateFeedStartAndExpirationDates(self, problems, first_date, last_date, first_date_origin, last_date_origin, today): warning_cutoff = today + datetime.timedelta(days=60) if last_date < warning_cutoff: problems.ExpirationDate(time.mktime(last_date.timetuple()), last_date_origin) if first_date > today: problems.FutureService(time.mktime(first_date.timetuple()), first_date_origin)
197,300
Main interface for drawing the marey graph. If called without arguments, the data generated in the previous call will be used. New decorators can be added between calls. Args: # Class Stop is defined in transitfeed.py stoplist: [Stop, Stop, ...] # Class Trip is defined in transitfeed.py triplist: [Trip, Trip, ...] Returns: # A string that contain a svg/xml web-page with a marey graph. " <svg width="1440" height="520" version="1.1" ... "
def Draw(self, stoplist=None, triplist=None, height=520): output = str() if not triplist: triplist = [] if not stoplist: stoplist = [] if not self._cache or triplist or stoplist: self._gheight = height self._tlist=triplist self._slist=stoplist self._decorators = [] self._stations = self._BuildStations(stoplist) self._cache = "%s %s %s %s" % (self._DrawBox(), self._DrawHours(), self._DrawStations(), self._DrawTrips(triplist)) output = "%s %s %s %s" % (self._DrawHeader(), self._cache, self._DrawDecorators(), self._DrawFooter()) return output
197,321
Dispatches the best algorithm for calculating station line position. Args: # Class Stop is defined in transitfeed.py stoplist: [Stop, Stop, ...] # Class Trip is defined in transitfeed.py triplist: [Trip, Trip, ...] Returns: # One integer y-coordinate for each station normalized between # 0 and X, where X is the height of the graph in pixels [0, 33, 140, ... , X]
def _BuildStations(self, stoplist): stations = [] dists = self._EuclidianDistances(stoplist) stations = self._CalculateYLines(dists) return stations
197,324
Calculate euclidian distances between stops. Uses the stoplists long/lats to approximate distances between stations and build a list with y-coordinates for the horizontal lines in the graph. Args: # Class Stop is defined in transitfeed.py stoplist: [Stop, Stop, ...] Returns: # One integer for each pair of stations # indicating the approximate distance [0,33,140, ... ,X]
def _EuclidianDistances(self,slist): e_dists2 = [transitfeed.ApproximateDistanceBetweenStops(stop, tail) for (stop,tail) in itertools.izip(slist, slist[1:])] return e_dists2
197,325
Builds a list with y-coordinates for the horizontal lines in the graph. Args: # One integer for each pair of stations # indicating the approximate distance dists: [0,33,140, ... ,X] Returns: # One integer y-coordinate for each station normalized between # 0 and X, where X is the height of the graph in pixels [0, 33, 140, ... , X]
def _CalculateYLines(self, dists): tot_dist = sum(dists) if tot_dist > 0: pixel_dist = [float(d * (self._gheight-20))/tot_dist for d in dists] pixel_grid = [0]+[int(pd + sum(pixel_dist[0:i])) for i,pd in enumerate(pixel_dist)] else: pixel_grid = [] return pixel_grid
197,326
Calculate distances and plot stops. Uses a timetable to approximate distances between stations Args: # Class Trip is defined in transitfeed.py triplist: [Trip, Trip, ...] # (Optional) Index of Triplist prefered for timetable Calculation index: 3 Returns: # One integer for each pair of stations # indicating the approximate distance [0,33,140, ... ,X]
def _TravelTimes(self,triplist,index=0): def DistanceInTravelTime(dep_secs, arr_secs): t_dist = arr_secs-dep_secs if t_dist<0: t_dist = self._DUMMY_SEPARATOR # min separation return t_dist if not triplist: return [] if 0 < index < len(triplist): trip = triplist[index] else: trip = triplist[0] t_dists2 = [DistanceInTravelTime(stop[3],tail[2]) for (stop,tail) in itertools.izip(trip.GetTimeStops(),trip.GetTimeStops()[1:])] return t_dists2
197,327
Generates svg polylines for each transit trip. Args: # Class Trip is defined in transitfeed.py [Trip, Trip, ...] Returns: # A string containing a polyline tag for each trip ' <polyline class="T" stroke="#336633" points="433,0 ...'
def _DrawTrips(self,triplist,colpar=""): stations = [] if not self._stations and triplist: self._stations = self._CalculateYLines(self._TravelTimes(triplist)) if not self._stations: self._AddWarning("Failed to use traveltimes for graph") self._stations = self._CalculateYLines(self._Uniform(triplist)) if not self._stations: self._AddWarning("Failed to calculate station distances") return stations = self._stations tmpstrs = [] servlist = [] for t in triplist: if not colpar: if t.service_id not in servlist: servlist.append(t.service_id) shade = int(servlist.index(t.service_id) * (200/len(servlist))+55) color = "#00%s00" % hex(shade)[2:4] else: color=colpar start_offsets = [0] first_stop = t.GetTimeStops()[0] for j,freq_offset in enumerate(start_offsets): if j>0 and not colpar: color="purple" scriptcall = 'onmouseover="LineClick(\'%s\',\'Trip %s starting %s\')"' % (t.trip_id, t.trip_id, transitfeed.FormatSecondsSinceMidnight(t.GetStartTime())) tmpstrhead = '<polyline class="T" id="%s" stroke="%s" %s points="' % \ (str(t.trip_id),color, scriptcall) tmpstrs.append(tmpstrhead) for i, s in enumerate(t.GetTimeStops()): arr_t = s[0] dep_t = s[1] if arr_t is None or dep_t is None: continue arr_x = int(arr_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset dep_x = int(dep_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset tmpstrs.append("%s,%s " % (int(arr_x+20), int(stations[i]+20))) tmpstrs.append("%s,%s " % (int(dep_x+20), int(stations[i]+20))) tmpstrs.append('" />') return "".join(tmpstrs)
197,328
Generates svg with a horizontal line for each station/stop. Args: # Class Stop is defined in transitfeed.py stations: [Stop, Stop, ...] Returns: # A string containing a polyline tag for each stop " <polyline class="Station" stroke="#336633" points="20,0 ..."
def _DrawStations(self, color="#aaa"): stations=self._stations tmpstrs = [] for y in stations: tmpstrs.append(' <polyline class="Station" stroke="%s" \ points="%s,%s, %s,%s" />' %(color,20,20+y+.5,self._gwidth+20,20+y+.5)) return "".join(tmpstrs)
197,330
Flushes existing decorations and highlights the given station-line. Args: # Integer, index of stop to be highlighted. index: 4 # An optional string with a html color code color: "#fff"
def AddStationDecoration(self, index, color="#f00"): tmpstr = str() num_stations = len(self._stations) ind = int(index) if self._stations: if 0<ind<num_stations: y = self._stations[ind] tmpstr = '<polyline class="Dec" stroke="%s" points="%s,%s,%s,%s" />' \ % (color, 20, 20+y+.5, self._gwidth+20, 20+y+.5) self._decorators.append(tmpstr)
197,332
Flushes existing decorations and highlights the given trips. Args: # Class Trip is defined in transitfeed.py triplist: [Trip, Trip, ...] # An optional string with a html color code color: "#fff"
def AddTripDecoration(self, triplist, color="#f00"): tmpstr = self._DrawTrips(triplist,color) self._decorators.append(tmpstr)
197,333
Changes the zoom of the graph manually. 1.0 is the original canvas size. Args: # float value between 0.0 and 5.0 newfactor: 0.7
def ChangeScaleFactor(self, newfactor): if float(newfactor) > 0 and float(newfactor) < self._MAX_ZOOM: self._zoomfactor = newfactor
197,334
Create a KML Folder element. Args: parent: The parent ElementTree.Element instance. name: The folder name as a string. visible: Whether the folder is initially visible or not. description: A description string or None. Returns: The folder ElementTree.Element instance.
def _CreateFolder(self, parent, name, visible=True, description=None): folder = ET.SubElement(parent, 'Folder') name_tag = ET.SubElement(folder, 'name') name_tag.text = name if description is not None: desc_tag = ET.SubElement(folder, 'description') desc_tag.text = description if not visible: visibility = ET.SubElement(folder, 'visibility') visibility.text = '0' return folder
197,347
Create a KML Style element for the route. The style sets the line colour if the route colour is specified. The line thickness is set depending on the vehicle type. Args: doc: The KML Document ElementTree.Element instance. route: The transitfeed.Route to create the style for. Returns: The id of the style as a string.
def _CreateStyleForRoute(self, doc, route): style_id = 'route_%s' % route.route_id style = ET.SubElement(doc, 'Style', {'id': style_id}) linestyle = ET.SubElement(style, 'LineStyle') width = ET.SubElement(linestyle, 'width') type_to_width = {0: '3', # Tram 1: '3', # Subway 2: '5', # Rail 3: '1'} # Bus width.text = type_to_width.get(route.route_type, '1') if route.route_color: color = ET.SubElement(linestyle, 'color') red = route.route_color[0:2].lower() green = route.route_color[2:4].lower() blue = route.route_color[4:6].lower() color.text = 'ff%s%s%s' % (blue, green, red) return style_id
197,348
Create a KML Placemark element. Args: parent: The parent ElementTree.Element instance. name: The placemark name as a string. style_id: If not None, the id of a style to use for the placemark. visible: Whether the placemark is initially visible or not. description: A description string or None. Returns: The placemark ElementTree.Element instance.
def _CreatePlacemark(self, parent, name, style_id=None, visible=True, description=None): placemark = ET.SubElement(parent, 'Placemark') placemark_name = ET.SubElement(placemark, 'name') placemark_name.text = name if description is not None: desc_tag = ET.SubElement(placemark, 'description') desc_tag.text = description if style_id is not None: styleurl = ET.SubElement(placemark, 'styleUrl') styleurl.text = '#%s' % style_id if not visible: visibility = ET.SubElement(placemark, 'visibility') visibility.text = '0' return placemark
197,349
Create a KML LineString element. The points of the string are given in coordinate_list. Every element of coordinate_list should be one of a tuple (longitude, latitude) or a tuple (longitude, latitude, altitude). Args: parent: The parent ElementTree.Element instance. coordinate_list: The list of coordinates. Returns: The LineString ElementTree.Element instance or None if coordinate_list is empty.
def _CreateLineString(self, parent, coordinate_list): if not coordinate_list: return None linestring = ET.SubElement(parent, 'LineString') tessellate = ET.SubElement(linestring, 'tessellate') tessellate.text = '1' if len(coordinate_list[0]) == 3: altitude_mode = ET.SubElement(linestring, 'altitudeMode') altitude_mode.text = 'absolute' coordinates = ET.SubElement(linestring, 'coordinates') if len(coordinate_list[0]) == 3: coordinate_str_list = ['%f,%f,%f' % t for t in coordinate_list] else: coordinate_str_list = ['%f,%f' % t for t in coordinate_list] coordinates.text = ' '.join(coordinate_str_list) return linestring
197,350
Create a KML LineString using coordinates from a shape. Args: parent: The parent ElementTree.Element instance. shape: The transitfeed.Shape instance. Returns: The LineString ElementTree.Element instance or None if coordinate_list is empty.
def _CreateLineStringForShape(self, parent, shape): coordinate_list = [(longitude, latitude) for (latitude, longitude, distance) in shape.points] return self._CreateLineString(parent, coordinate_list)
197,351
Create a KML Folder containing placemarks for each stop in the schedule. If there are no stops in the schedule then no folder is created. Args: schedule: The transitfeed.Schedule instance. doc: The KML Document ElementTree.Element instance. Returns: The Folder ElementTree.Element instance or None if there are no stops.
def _CreateStopsFolder(self, schedule, doc): if not schedule.GetStopList(): return None stop_folder = self._CreateFolder(doc, 'Stops') stop_folder_selection = self._StopFolderSelectionMethod(stop_folder) stop_style_selection = self._StopStyleSelectionMethod(doc) stops = list(schedule.GetStopList()) stops.sort(key=lambda x: x.stop_name) for stop in stops: (folder, pathway_folder) = stop_folder_selection(stop) (style_id, pathway_style_id) = stop_style_selection(stop) self._CreateStopPlacemark(folder, stop, style_id) if (self.show_stop_hierarchy and stop.location_type != transitfeed.Stop.LOCATION_TYPE_STATION and stop.parent_station and stop.parent_station in schedule.stops): placemark = self._CreatePlacemark( pathway_folder, stop.stop_name, pathway_style_id) parent_station = schedule.stops[stop.parent_station] coordinates = [(stop.stop_lon, stop.stop_lat), (parent_station.stop_lon, parent_station.stop_lat)] self._CreateLineString(placemark, coordinates) return stop_folder
197,352
Creates a new stop <Placemark/> element. Args: stop_folder: the KML folder the placemark will be added to. stop: the actual Stop to create a placemark for. style_id: optional argument indicating a style id to add to the placemark.
def _CreateStopPlacemark(self, stop_folder, stop, style_id): desc_items = [] desc_items.append("Stop id: %s" % stop.stop_id) if stop.stop_desc: desc_items.append(stop.stop_desc) if stop.stop_url: desc_items.append('Stop info page: <a href="%s">%s</a>' % ( stop.stop_url, stop.stop_url)) description = '<br/>'.join(desc_items) or None placemark = self._CreatePlacemark(stop_folder, stop.stop_name, description=description, style_id=style_id) point = ET.SubElement(placemark, 'Point') coordinates = ET.SubElement(point, 'coordinates') coordinates.text = '%.6f,%.6f' % (stop.stop_lon, stop.stop_lat)
197,356
Create a KML Folder containing all the trips in the route. The folder contains a placemark for each of these trips. If there are no trips in the route, no folder is created and None is returned. Args: parent: The parent ElementTree.Element instance. route: The transitfeed.Route instance. style_id: A style id string for the placemarks or None. Returns: The Folder ElementTree.Element instance or None.
def _CreateRouteTripsFolder(self, parent, route, style_id=None, schedule=None): if not route.trips: return None trips = list(route.trips) trips.sort(key=lambda x: x.trip_id) trips_folder = self._CreateFolder(parent, 'Trips', visible=False) for trip in trips: if (self.date_filter and not trip.service_period.IsActiveOn(self.date_filter)): continue if trip.trip_headsign: description = 'Headsign: %s' % trip.trip_headsign else: description = None coordinate_list = [] for secs, stoptime, tp in trip.GetTimeInterpolatedStops(): if self.altitude_per_sec > 0: coordinate_list.append((stoptime.stop.stop_lon, stoptime.stop.stop_lat, (secs - 3600 * 4) * self.altitude_per_sec)) else: coordinate_list.append((stoptime.stop.stop_lon, stoptime.stop.stop_lat)) placemark = self._CreatePlacemark(trips_folder, trip.trip_id, style_id=style_id, visible=False, description=description) self._CreateLineString(placemark, coordinate_list) return trips_folder
197,359
Create a KML Folder containing all the shapes in a schedule. The folder contains a placemark for each shape. If there are no shapes in the schedule then the folder is not created and None is returned. Args: schedule: The transitfeed.Schedule instance. doc: The KML Document ElementTree.Element instance. Returns: The Folder ElementTree.Element instance or None.
def _CreateShapesFolder(self, schedule, doc): if not schedule.GetShapeList(): return None shapes_folder = self._CreateFolder(doc, 'Shapes') shapes = list(schedule.GetShapeList()) shapes.sort(key=lambda x: x.shape_id) for shape in shapes: placemark = self._CreatePlacemark(shapes_folder, shape.shape_id) self._CreateLineStringForShape(placemark, shape) if self.shape_points: self._CreateShapePointFolder(shapes_folder, shape) return shapes_folder
197,361
Create a KML Folder containing all the shape points in a shape. The folder contains placemarks for each shapepoint. Args: shapes_folder: A KML Shape Folder ElementTree.Element instance shape: The shape to plot. Returns: The Folder ElementTree.Element instance or None.
def _CreateShapePointFolder(self, shapes_folder, shape): folder_name = shape.shape_id + ' Shape Points' folder = self._CreateFolder(shapes_folder, folder_name, visible=False) for (index, (lat, lon, dist)) in enumerate(shape.points): placemark = self._CreatePlacemark(folder, str(index+1)) point = ET.SubElement(placemark, 'Point') coordinates = ET.SubElement(point, 'coordinates') coordinates.text = '%.6f,%.6f' % (lon, lat) return folder
197,362
Writes out a feed as KML. Args: schedule: A transitfeed.Schedule object containing the feed to write. output_file: The name of the output KML file, or file object to use.
def Write(self, schedule, output_file): # Generate the DOM to write root = ET.Element('kml') root.attrib['xmlns'] = 'http://earth.google.com/kml/2.1' doc = ET.SubElement(root, 'Document') open_tag = ET.SubElement(doc, 'open') open_tag.text = '1' self._CreateStopsFolder(schedule, doc) if self.split_routes: route_types = set() for route in schedule.GetRouteList(): route_types.add(route.route_type) route_types = list(route_types) route_types.sort() for route_type in route_types: self._CreateRoutesFolder(schedule, doc, route_type) else: self._CreateRoutesFolder(schedule, doc) self._CreateShapesFolder(schedule, doc) # Make sure we pretty-print self._SetIndentation(root) # Now write the output if isinstance(output_file, file): output = output_file else: output = open(output_file, 'w') output.write() ET.ElementTree(root).write(output, 'utf-8')
197,363
Initialize a new Stop object. Args: field_dict: A dictionary mapping attribute name to unicode string lat: a float, ignored when field_dict is present lng: a float, ignored when field_dict is present name: a string, ignored when field_dict is present stop_id: a string, ignored when field_dict is present stop_code: a string, ignored when field_dict is present
def __init__(self, lat=None, lng=None, name=None, stop_id=None, field_dict=None, stop_code=None): self._schedule = None if field_dict: if isinstance(field_dict, self.__class__): # Special case so that we don't need to re-parse the attributes to # native types iteritems returns all attributes that don't start with _ for k, v in field_dict.iteritems(): self.__dict__[k] = v else: self.__dict__.update(field_dict) else: if lat is not None: self.stop_lat = lat if lng is not None: self.stop_lon = lng if name is not None: self.stop_name = name if stop_id is not None: self.stop_id = stop_id if stop_code is not None: self.stop_code = stop_code
197,364
Return a list of (trip, stop_sequence) for all trips visiting this stop. A trip may be in the list multiple times with different index. stop_sequence is an integer. Args: schedule: Deprecated, do not use.
def _GetTripSequence(self, schedule=None): if schedule is None: schedule = getattr(self, "_schedule", None) if schedule is None: warnings.warn("No longer supported. _schedule attribute is used to get " "stop_times table", DeprecationWarning) cursor = schedule._connection.cursor() cursor.execute("SELECT trip_id,stop_sequence FROM stop_times " "WHERE stop_id=?", (self.stop_id, )) return [(schedule.GetTrip(row[0]), row[1]) for row in cursor]
197,365
Write the HTML dumping all problems of one type. Args: level_name: string such as "Error" or "Warning" class_problist: sequence of tuples (class name, BoundedProblemList object) Returns: HTML in a string
def FormatType(self, level_name, class_problist): class_problist.sort() output = [] for classname, problist in class_problist: output.append('<h4 class="issueHeader"><a name="%s%s">%s</a></h4><ul>\n' % (level_name, classname, UnCamelCase(classname))) for e in problist.problems: self.FormatException(e, output) if problist.dropped_count: output.append('<li>and %d more of this type.' % (problist.dropped_count)) output.append('</ul>\n') return ''.join(output)
197,400
Return an HTML table listing the number of problems by class name. Args: level_name: string such as "Error" or "Warning" name_to_problist: dict mapping class name to an BoundedProblemList object Returns: HTML in a string
def FormatTypeSummaryTable(self, level_name, name_to_problist): output = [] output.append('<table>') for classname in sorted(name_to_problist.keys()): problist = name_to_problist[classname] human_name = MaybePluralizeWord(problist.count, UnCamelCase(classname)) output.append('<tr><td>%d</td><td><a href="#%s%s">%s</a></td></tr>\n' % (problist.count, level_name, classname, human_name)) output.append('</table>\n') return ''.join(output)
197,401
Set service as running (or not) on a day of the week. By default the service does not run on any days. Args: dow: 0 for Monday through 6 for Sunday has_service: True if this service operates on dow, False if it does not. Returns: None
def SetDayOfWeekHasService(self, dow, has_service=True): assert(dow >= 0 and dow < 7) self.day_of_week[dow] = has_service
197,412
Test if this service period has a date exception of the given type. Args: date: a string of form "YYYYMMDD" exception_type: the exception type the date should have. Defaults to _EXCEPTION_TYPE_ADD Returns: True iff this service has service exception of specified type at date.
def HasDateExceptionOn(self, date, exception_type=_EXCEPTION_TYPE_ADD): if date in self.date_exceptions: return exception_type == self.date_exceptions[date][0] return False
197,415
Initialize a new Agency object. Args: field_dict: A dictionary mapping attribute name to unicode string name: a string, ignored when field_dict is present url: a string, ignored when field_dict is present timezone: a string, ignored when field_dict is present id: a string, ignored when field_dict is present kwargs: arbitrary keyword arguments may be used to add attributes to the new object, ignored when field_dict is present
def __init__(self, name=None, url=None, timezone=None, id=None, email=None, field_dict=None, lang=None, **kwargs): self._schedule = None if not field_dict: if name: kwargs['agency_name'] = name if url: kwargs['agency_url'] = url if timezone: kwargs['agency_timezone'] = timezone if id: kwargs['agency_id'] = id if lang: kwargs['agency_lang'] = lang if email: kwargs['agency_email'] = email field_dict = kwargs self.__dict__.update(field_dict)
197,429
Check if there is a newer version of transitfeed available. Args: problems: if a new version is available, a NewVersionAvailable problem will be added latest_version: if specified, override the latest version read from the project page
def CheckVersion(problems, latest_version=None): if not latest_version: timeout = 20 socket.setdefaulttimeout(timeout) request = urllib2.Request(LATEST_RELEASE_VERSION_URL) try: response = urllib2.urlopen(request) content = response.read() m = re.search(r'version=(\d+\.\d+\.\d+)', content) if m: latest_version = m.group(1) except urllib2.HTTPError as e: description = ('During the new-version check, we failed to reach ' 'transitfeed server: Reason: %s [%s].' % (e.reason, e.code)) problems.OtherProblem( description=description, type=errors.TYPE_NOTICE) return except urllib2.URLError as e: description = ('During the new-version check, we failed to reach ' 'transitfeed server. Reason: %s.' % e.reason) problems.OtherProblem( description=description, type=errors.TYPE_NOTICE) return if not latest_version: description = ('During the new-version check, we had trouble parsing the ' 'contents of %s.' % LATEST_RELEASE_VERSION_URL) problems.OtherProblem( description=description, type=errors.TYPE_NOTICE) return newest_version = _MaxVersion([latest_version, __version__]) if __version__ != newest_version: problems.NewVersionAvailable(newest_version)
197,461
Compute the brightness of an sRGB color using the formula from http://www.w3.org/TR/2000/WD-AERT-20000426#color-contrast. Args: color: a string of 6 hex digits in the format verified by IsValidHexColor(). Returns: A floating-point number between 0.0 (black) and 255.0 (white).
def ColorLuminance(color): r = int(color[0:2], 16) g = int(color[2:4], 16) b = int(color[4:6], 16) return (299*r + 587*g + 114*b) / 1000.0
197,471
Create new object. Args: f: file-like object to wrap name: name to use for f. StringIO objects don't have a name attribute. problems: a ProblemReporterBase object
def __init__(self, f, name, problems): self._f = f self._name = name self._crlf = 0 self._crlf_examples = [] self._lf = 0 self._lf_examples = [] self._line_number = 0 # first line will be number 1 self._problems = problems
197,483
Add a trip to this route. Args: schedule: a Schedule object which will hold the new trip or None to use the schedule of this route. headsign: headsign of the trip as a string service_period: a ServicePeriod object or None to use schedule.GetDefaultServicePeriod() trip_id: optional trip_id for the new trip Returns: a new Trip object
def AddTrip(self, schedule=None, headsign=None, service_period=None, trip_id=None): if schedule is None: assert self._schedule is not None schedule = self._schedule if trip_id is None: trip_id = util.FindUniqueId(schedule.trips) if service_period is None: service_period = schedule.GetDefaultServicePeriod() trip_class = self.GetGtfsFactory().Trip trip_obj = trip_class(route=self, headsign=headsign, service_period=service_period, trip_id=trip_id) schedule.AddTripObject(trip_obj) return trip_obj
197,514
Returns the transitfeed class corresponding to a GTFS file. Args: filename: The filename whose class is to be returned Raises: NonStandardMapping if the specified filename has more than one corresponding class
def GetGtfsClassByFileName(self, filename): if filename not in self._file_mapping: return None mapping = self._file_mapping[filename] class_list = mapping['classes'] if len(class_list) > 1: raise problems.NonStandardMapping(filename) else: return self._class_mapping[class_list[0]]
197,530
Adds an entry to the list of known filenames. Args: filename: The filename whose mapping is being added. new_mapping: A dictionary with the mapping to add. Must contain all fields in _REQUIRED_MAPPING_FIELDS. Raises: DuplicateMapping if the filename already exists in the mapping InvalidMapping if not all required fields are present
def AddMapping(self, filename, new_mapping): for field in self._REQUIRED_MAPPING_FIELDS: if field not in new_mapping: raise problems.InvalidMapping(field) if filename in self.GetKnownFilenames(): raise problems.DuplicateMapping(filename) self._file_mapping[filename] = new_mapping
197,533
Updates an entry in the list of known filenames. An entry is identified by its filename. Args: filename: The filename whose mapping is to be updated mapping_update: A dictionary containing the fields to update and their new values. Raises: InexistentMapping if the filename does not exist in the mapping
def UpdateMapping(self, filename, mapping_update): if filename not in self._file_mapping: raise problems.NonexistentMapping(filename) mapping = self._file_mapping[filename] mapping.update(mapping_update)
197,534
Adds an entry to the list of known classes. Args: class_name: A string with name through which gtfs_class is to be made accessible. gtfs_class: The class to be added. Raises: DuplicateMapping if class_name is already present in the class mapping.
def AddClass(self, class_name, gtfs_class): if class_name in self._class_mapping: raise problems.DuplicateMapping(class_name) self._class_mapping[class_name] = gtfs_class
197,535
Updates an entry in the list of known classes. Args: class_name: A string with the class name that is to be updated. gtfs_class: The new class Raises: NonexistentMapping if there is no class with the specified class_name.
def UpdateClass(self, class_name, gtfs_class): if class_name not in self._class_mapping: raise problems.NonexistentMapping(class_name) self._class_mapping[class_name] = gtfs_class
197,536
Removes an entry from the list of known classes. Args: class_name: A string with the class name that is to be removed. Raises: NonexistentMapping if there is no class with the specified class_name.
def RemoveClass(self, class_name): if class_name not in self._class_mapping: raise problems.NonexistentMapping(class_name) del self._class_mapping[class_name]
197,537
Reads the kml file, parses it and updated the Google transit feed object with the extracted information. Args: filename - kml file name feed - an instance of Schedule class to be updated
def Parse(self, filename, feed): dom = minidom.parse(filename) self.ParseDom(dom, feed)
197,539
Parses the given kml dom tree and updates the Google transit feed object. Args: dom - kml dom tree feed - an instance of Schedule class to be updated
def ParseDom(self, dom, feed): shape_num = 0 for node in dom.getElementsByTagName('Placemark'): p = self.ParsePlacemark(node) if p.IsPoint(): (lon, lat) = p.coordinates[0] m = self.stopNameRe.search(p.name) feed.AddStop(lat, lon, m.group(1)) elif p.IsLine(): self.ConvertPlacemarkToShape(p, feed)
197,540
Write html to f for up to limit trips between locations. Args: title: String used in html title locations: list of (lat, lng) tuples limit: maximum number of queries in the html f: a file object
def WriteOutput(title, locations, limit, f): output_prefix = % locals() output_suffix = % locals() f.write(transitfeed.EncodeUnicode(output_prefix)) for source, destination in zip(locations[0:limit], locations[1:limit + 1]): f.write(transitfeed.EncodeUnicode("<li>%s\n" % LatLngsToGoogleLink(source, destination))) f.write(transitfeed.EncodeUnicode(output_suffix))
197,551
Finds the distance between two points on the Earth's surface. This is an approximate distance based on assuming that the Earth is a sphere. The points are specified by their lattitude and longitude. Args: pa: the first (lat, lon) point tuple pb: the second (lat, lon) point tuple Returns: The distance as a float in metres.
def ApproximateDistanceBetweenPoints(pa, pb): alat, alon = pa blat, blon = pb sa = transitfeed.Stop(lat=alat, lng=alon) sb = transitfeed.Stop(lat=blat, lng=blon) return transitfeed.ApproximateDistanceBetweenStops(sa, sb)
197,580
Initialise the exception object. Args: dataset_merger: The DataSetMerger instance that generated this problem. problem_type: The problem severity. This should be set to one of the corresponding constants in transitfeed. kwargs: Keyword arguments to be saved as instance attributes.
def __init__(self, dataset_merger, problem_type=transitfeed.TYPE_WARNING, **kwargs): kwargs['type'] = problem_type kwargs['entity_type_name'] = dataset_merger.ENTITY_TYPE_NAME transitfeed.ExceptionWithContext.__init__(self, None, None, **kwargs) self.dataset_merger = dataset_merger
197,583
Generate an HTML table of merge statistics. Args: feed_merger: The FeedMerger instance. Returns: The generated HTML as a string.
def _GenerateStatsTable(self, feed_merger): rows = [] rows.append('<tr><th class="header"/><th class="header">Merged</th>' '<th class="header">Copied from old feed</th>' '<th class="header">Copied from new feed</th></tr>') for merger in feed_merger.GetMergerList(): stats = merger.GetMergeStats() if stats is None: continue merged, not_merged_a, not_merged_b = stats rows.append('<tr><th class="header">%s</th>' '<td class="header">%d</td>' '<td class="header">%d</td>' '<td class="header">%d</td></tr>' % (merger.DATASET_NAME, merged, not_merged_a, not_merged_b)) return '<table>%s</table>' % '\n'.join(rows)
197,588
Generate a listing of the given type of problems. Args: problem_type: The type of problem. This is one of the problem type constants from transitfeed. Returns: The generated HTML as a string.
def _GenerateSection(self, problem_type): if problem_type == transitfeed.TYPE_WARNING: dataset_problems = self._dataset_warnings heading = 'Warnings' else: dataset_problems = self._dataset_errors heading = 'Errors' if not dataset_problems: return '' prefix = '<h2 class="issueHeader">%s:</h2>' % heading dataset_sections = [] for dataset_merger, problems in dataset_problems.items(): dataset_sections.append('<h3>%s</h3><ol>%s</ol>' % ( dataset_merger.FILE_NAME, '\n'.join(problems))) body = '\n'.join(dataset_sections) return prefix + body
197,589
Write the HTML output to a file. Args: output_file: The file object that the HTML output will be written to. feed_merger: The FeedMerger instance. old_feed_path: The path to the old feed file as a string. new_feed_path: The path to the new feed file as a string merged_feed_path: The path to the merged feed file as a string. This may be None if no merged feed was written.
def WriteOutput(self, output_file, feed_merger, old_feed_path, new_feed_path, merged_feed_path): if merged_feed_path is None: html_merged_feed_path = '' else: html_merged_feed_path = '<p>Merged feed created: <code>%s</code></p>' % ( merged_feed_path) html_header = % locals() html_stats = self._GenerateStatsTable(feed_merger) html_summary = self._GenerateSummary() html_notices = self._GenerateNotices() html_errors = self._GenerateSection(transitfeed.TYPE_ERROR) html_warnings = self._GenerateSection(transitfeed.TYPE_WARNING) html_footer = % (transitfeed.__version__, time.strftime('%B %d, %Y at %I:%M %p %Z')) output_file.write(transitfeed.EncodeUnicode(html_header)) output_file.write(transitfeed.EncodeUnicode(html_stats)) output_file.write(transitfeed.EncodeUnicode(html_summary)) output_file.write(transitfeed.EncodeUnicode(html_notices)) output_file.write(transitfeed.EncodeUnicode(html_errors)) output_file.write(transitfeed.EncodeUnicode(html_warnings)) output_file.write(transitfeed.EncodeUnicode(html_footer))
197,592
Initialise. Args: feed_merger: The FeedMerger.
def __init__(self, feed_merger): self.feed_merger = feed_merger self._num_merged = 0 self._num_not_merged_a = 0 self._num_not_merged_b = 0
197,593
Tries to merge two values. The values are required to be identical. Args: a: The first value. b: The second value. Returns: The trivially merged value. Raises: MergeError: The values were not identical.
def _MergeIdentical(self, a, b): if a != b: raise MergeError("values must be identical ('%s' vs '%s')" % (transitfeed.EncodeUnicode(a), transitfeed.EncodeUnicode(b))) return b
197,594
Tries to merge two strings. The string are required to be the same ignoring case. The second string is always used as the merged value. Args: a: The first string. b: The second string. Returns: The merged string. This is equal to the second string. Raises: MergeError: The strings were not the same ignoring case.
def _MergeIdenticalCaseInsensitive(self, a, b): if a.lower() != b.lower(): raise MergeError("values must be the same (case insensitive) " "('%s' vs '%s')" % (transitfeed.EncodeUnicode(a), transitfeed.EncodeUnicode(b))) return b
197,595
Tries to merge two values which may be None. If both values are not None, they are required to be the same and the merge is trivial. If one of the values is None and the other is not None, the merge results in the one which is not None. If both are None, the merge results in None. Args: a: The first value. b: The second value. Returns: The merged value. Raises: MergeError: If both values are not None and are not the same.
def _MergeOptional(self, a, b): if a and b: if a != b: raise MergeError("values must be identical if both specified " "('%s' vs '%s')" % (transitfeed.EncodeUnicode(a), transitfeed.EncodeUnicode(b))) return a or b
197,596
Merge agency ids to the corresponding agency id in the merged schedule. Args: a_agency_id: an agency id from the old schedule b_agency_id: an agency id from the new schedule Returns: The agency id of the corresponding merged agency. Raises: MergeError: If a_agency_id and b_agency_id do not correspond to the same merged agency. KeyError: Either aaid or baid is not a valid agency id.
def _MergeSameAgency(self, a_agency_id, b_agency_id): a_agency_id = (a_agency_id or self.feed_merger.a_schedule.GetDefaultAgency().agency_id) b_agency_id = (b_agency_id or self.feed_merger.b_schedule.GetDefaultAgency().agency_id) a_agency = self.feed_merger.a_schedule.GetAgency( a_agency_id)._migrated_entity b_agency = self.feed_merger.b_schedule.GetAgency( b_agency_id)._migrated_entity if a_agency != b_agency: raise MergeError('agency must be the same') return a_agency.agency_id
197,597
Report that two entities have the same id but could not be merged. Args: entity_id: The id of the entities. reason: A string giving a reason why they could not be merged.
def _ReportSameIdButNotMerged(self, entity_id, reason): self.feed_merger.problem_reporter.SameIdButNotMerged(self, entity_id, reason)
197,602
Check if the schedule has an entity with the given id. Args: schedule: The transitfeed.Schedule instance to look in. entity_id: The id of the entity. Returns: True if the schedule has an entity with the id or False if not.
def _HasId(self, schedule, entity_id): try: self._GetById(schedule, entity_id) has = True except KeyError: has = False return has
197,603
Merges two agencies. To be merged, they are required to have the same id, name, url and timezone. The remaining language attribute is taken from the new agency. Args: a: The first agency. b: The second agency. Returns: The merged agency. Raises: MergeError: The agencies could not be merged.
def _MergeEntities(self, a, b): def _MergeAgencyId(a_agency_id, b_agency_id): a_agency_id = a_agency_id or None b_agency_id = b_agency_id or None return self._MergeIdentical(a_agency_id, b_agency_id) scheme = {'agency_id': _MergeAgencyId, 'agency_name': self._MergeIdentical, 'agency_url': self._MergeIdentical, 'agency_timezone': self._MergeIdentical} return self._SchemedMerge(scheme, a, b)
197,604