body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
6d3e131fdc5057dda4e6c398e217429128cb6c967684b892de338d14141b6742 | def _get_feedback_inner(self, state: np.ndarray, action: int, reward: float, next_state: np.ndarray, finished: bool):
'\n implement this function if you want to gather informations about your game\n :param state:\n :param action:\n :param reward:\n :param next_state:\n :param finished:\n :return:\n '
pass | implement this function if you want to gather informations about your game
:param state:
:param action:
:param reward:
:param next_state:
:param finished:
:return: | checkers/agents/Agent.py | _get_feedback_inner | FelixKleineBoesing/CheckersAI | 1 | python | def _get_feedback_inner(self, state: np.ndarray, action: int, reward: float, next_state: np.ndarray, finished: bool):
'\n implement this function if you want to gather informations about your game\n :param state:\n :param action:\n :param reward:\n :param next_state:\n :param finished:\n :return:\n '
pass | def _get_feedback_inner(self, state: np.ndarray, action: int, reward: float, next_state: np.ndarray, finished: bool):
'\n implement this function if you want to gather informations about your game\n :param state:\n :param action:\n :param reward:\n :param next_state:\n :param finished:\n :return:\n '
pass<|docstring|>implement this function if you want to gather informations about your game
:param state:
:param action:
:param reward:
:param next_state:
:param finished:
:return:<|endoftext|> |
0a8d1534a1307d704414125b2db952c2a54256e637ab531437544228fbce21ad | @abc.abstractmethod
def decision(self, state_space: np.ndarray, action_space: ActionSpace):
'\n this function must implement a decision based in the action_space and other delivered arguments\n return must be a dictionary with the following keys: "stone_id" and "move_index" which indicates\n the stone and move that should be executed\n :param action_space:\n :return: np.array(X_From, Y_From, X_To, Y_To)\n '
pass | this function must implement a decision based in the action_space and other delivered arguments
return must be a dictionary with the following keys: "stone_id" and "move_index" which indicates
the stone and move that should be executed
:param action_space:
:return: np.array(X_From, Y_From, X_To, Y_To) | checkers/agents/Agent.py | decision | FelixKleineBoesing/CheckersAI | 1 | python | @abc.abstractmethod
def decision(self, state_space: np.ndarray, action_space: ActionSpace):
'\n this function must implement a decision based in the action_space and other delivered arguments\n return must be a dictionary with the following keys: "stone_id" and "move_index" which indicates\n the stone and move that should be executed\n :param action_space:\n :return: np.array(X_From, Y_From, X_To, Y_To)\n '
pass | @abc.abstractmethod
def decision(self, state_space: np.ndarray, action_space: ActionSpace):
'\n this function must implement a decision based in the action_space and other delivered arguments\n return must be a dictionary with the following keys: "stone_id" and "move_index" which indicates\n the stone and move that should be executed\n :param action_space:\n :return: np.array(X_From, Y_From, X_To, Y_To)\n '
pass<|docstring|>this function must implement a decision based in the action_space and other delivered arguments
return must be a dictionary with the following keys: "stone_id" and "move_index" which indicates
the stone and move that should be executed
:param action_space:
:return: np.array(X_From, Y_From, X_To, Y_To)<|endoftext|> |
6d0cea5ea2bc00367f5186d62b3e1127f424344be4b49fb6e2d67ff285f8e035 | def produce_scattertext_explorer(corpus, category, category_name=None, not_category_name=None, protocol='https', pmi_threshold_coefficient=DEFAULT_MINIMUM_TERM_FREQUENCY, minimum_term_frequency=DEFAULT_PMI_THRESHOLD_COEFFICIENT, minimum_not_category_term_frequency=0, max_terms=None, filter_unigrams=False, height_in_pixels=None, width_in_pixels=None, max_snippets=None, max_docs_per_category=None, metadata=None, scores=None, x_coords=None, y_coords=None, original_x=None, original_y=None, rescale_x=None, rescale_y=None, singleScoreMode=False, sort_by_dist=False, reverse_sort_scores_for_not_category=True, use_full_doc=False, transform=percentile_alphabetical, jitter=0, gray_zero_scores=False, term_ranker=None, asian_mode=False, match_full_line=False, use_non_text_features=False, show_top_terms=True, show_characteristic=True, word_vec_use_p_vals=False, max_p_val=0.1, p_value_colors=False, term_significance=None, save_svg_button=False, x_label=None, y_label=None, d3_url=None, d3_scale_chromatic_url=None, pmi_filter_thresold=None, alternative_text_field=None, terms_to_include=None, semiotic_square=None, num_terms_semiotic_square=None, not_categories=None, neutral_categories=[], extra_categories=[], show_neutral=False, neutral_category_name=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, x_axis_values_format=None, y_axis_values_format=None, color_func=None, term_scorer=None, show_axes=True, show_axes_and_cross_hairs=False, show_diagonal=False, use_global_scale=False, horizontal_line_y_position=None, vertical_line_x_position=None, show_cross_axes=True, show_extra=False, extra_category_name=None, censor_points=True, center_label_over_points=False, x_axis_labels=None, y_axis_labels=None, topic_model_term_lists=None, topic_model_preview_size=10, metadata_descriptions=None, vertical_lines=None, characteristic_scorer=None, term_colors=None, unified_context=False, show_category_headings=True, highlight_selected_category=False, include_term_category_counts=False, div_name=None, alternative_term_func=None, term_metadata=None, term_metadata_df=None, max_overlapping=(- 1), include_all_contexts=False, show_corpus_stats=True, sort_doc_labels_by_name=False, enable_term_category_description=True, always_jump=True, get_custom_term_html=None, header_names=None, header_sorting_algos=None, ignore_categories=False, d3_color_scale=None, background_labels=None, tooltip_columns=None, tooltip_column_names=None, term_description_columns=None, term_description_column_names=None, term_word_in_term_description='Term', color_column=None, color_score_column=None, label_priority_column=None, text_color_column=None, suppress_text_column=None, background_color=None, left_list_column=None, censor_point_column=None, right_order_column=None, line_coordinates=None, subword_encoding=None, top_terms_length=14, top_terms_left_buffer=0, dont_filter=False, use_offsets=False, return_data=False, return_scatterplot_structure=False):
'Returns html code of visualization.\n\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n Optional, defaults to category name.\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n Optional defaults to "N(n)ot " + category_name, with the case of the \'n\' dependent\n on the case of the first letter in category_name.\n protocol : str, optional\n Protocol to use. Either http or https. Default is https.\n pmi_threshold_coefficient : int, optional\n Filter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 6\n minimum_term_frequency : int, optional\n Minimum number of times word needs to appear to make it into visualization.\n minimum_not_category_term_frequency : int, optional\n If an n-gram does not occur in the category, minimum times it\n must been seen to be included. Default is 0.\n max_terms : int, optional\n Maximum number of terms to include in visualization.\n filter_unigrams : bool, optional\n Default False, do we filter out unigrams that only occur in one bigram\n width_in_pixels : int, optional\n Width of viz in pixels, if None, default to JS\'s choice\n height_in_pixels : int, optional\n Height of viz in pixels, if None, default to JS\'s choice\n max_snippets : int, optional\n Maximum number of snippets to show when term is clicked. If None, all are shown.\n max_docs_per_category: int, optional\n Maximum number of documents to store per category. If None, by default, all are stored.\n metadata : list or function, optional\n list of meta data strings that will be included for each document, if a function, called on corpus\n scores : np.array, optional\n Array of term scores or None.\n x_coords : np.array, optional\n Array of term x-axis positions or None. Must be in [0,1].\n If present, y_coords must also be present.\n y_coords : np.array, optional\n Array of term y-axis positions or None. Must be in [0,1].\n If present, x_coords must also be present.\n original_x : array-like\n Original, unscaled x-values. Defaults to x_coords\n original_y : array-like\n Original, unscaled y-values. Defaults to y_coords\n rescale_x : lambda list[0,1]: list[0,1], optional\n Array of term x-axis positions or None. Must be in [0,1].\n Rescales x-axis after filtering\n rescale_y : lambda list[0,1]: list[0,1], optional\n Array of term y-axis positions or None. Must be in [0,1].\n Rescales y-axis after filtering\n singleScoreMode : bool, optional\n Label terms based on score vs distance from corner. Good for topic scores. Show only one color.\n sort_by_dist: bool, optional\n Label terms based distance from corner. True by default. Negated by singleScoreMode.\n reverse_sort_scores_for_not_category: bool, optional\n If using a custom score, score the not-category class by\n lowest-score-as-most-predictive. Turn this off for word vector\n or topic similarity. Default True.\n use_full_doc : bool, optional\n Use the full document in snippets. False by default.\n transform : function, optional\n not recommended for editing. change the way terms are ranked. default is st.Scalers.percentile_ordinal\n jitter : float, optional\n percentage of axis to jitter each point. default is 0.\n gray_zero_scores : bool, optional\n If True, color points with zero-scores a light shade of grey. False by default.\n term_ranker : TermRanker, optional\n TermRanker class for determining term frequency ranks.\n asian_mode : bool, optional\n Use a special Javascript regular expression that\'s specific to chinese or japanese\n match_full_line : bool, optional\n Has the javascript regex match the full line instead of part of it\n use_non_text_features : bool, optional\n Show non-bag-of-words features (e.g., Empath) instead of text. False by default.\n show_top_terms : bool, default True\n Show top terms on the left-hand side of the visualization\n show_characteristic: bool, default True\n Show characteristic terms on the far left-hand side of the visualization\n word_vec_use_p_vals: bool, default False\n Sort by harmonic mean of score and distance.\n max_p_val : float, default 0.1\n If word_vec_use_p_vals, the minimum p val to use.\n p_value_colors : bool, default False\n Color points differently if p val is above 1-max_p_val, below max_p_val, or\n in between.\n term_significance : TermSignificance instance or None\n Way of getting signfiance scores. If None, p values will not be added.\n save_svg_button : bool, default False\n Add a save as SVG button to the page.\n x_label : str, default None\n Custom x-axis label\n y_label : str, default None\n Custom y-axis label\n d3_url, str, None by default. The url (or path) of d3.\n URL of d3, to be inserted into <script src="..."/>. Overrides `protocol`.\n By default, this is `DEFAULT_D3_URL` declared in `ScatterplotStructure`.\n d3_scale_chromatic_url, str, None by default. Overrides `protocol`.\n URL of d3 scale chromatic, to be inserted into <script src="..."/>\n By default, this is `DEFAULT_D3_SCALE_CHROMATIC` declared in `ScatterplotStructure`.\n pmi_filter_thresold : (DEPRECATED) int, None by default\n DEPRECATED. Use pmi_threshold_coefficient instead.\n alternative_text_field : str or None, optional\n Field in from dataframe used to make corpus to display in place of parsed text. Only\n can be used if corpus is a ParsedCorpus instance.\n terms_to_include : list or None, optional\n Whitelist of terms to include in visualization.\n semiotic_square : SemioticSquareBase\n None by default. SemioticSquare based on corpus. Includes square above visualization.\n num_terms_semiotic_square : int\n 10 by default. Number of terms to show in semiotic square.\n Only active if semiotic square is present.\n not_categories : list\n All categories other than category by default. Documents labeled\n with remaining category.\n neutral_categories : list\n [] by default. Documents labeled neutral.\n extra_categories : list\n [] by default. Documents labeled extra.\n show_neutral : bool\n False by default. Show a third column listing contexts in the\n neutral categories.\n neutral_category_name : str\n "Neutral" by default. Only active if show_neutral is True. Name of the neutral\n column.\n get_tooltip_content : str\n Javascript function to control content of tooltip. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string.\n x_axis_values : list, default None\n Value-labels to show on x-axis. Low, medium, high are defaults.\n y_axis_values : list, default None\n Value-labels to show on y-axis. Low, medium, high are defaults.\n x_axis_values_format : str, default None\n d3 format of x-axis values\n y_axis_values_format : str, default None\n d3 format of y-axis values\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string.\n term_scorer : Object, default None\n In lieu of scores, object with a get_scores(a,b) function that returns a set of scores,\n where a and b are term counts. Scorer optionally has a get_term_freqs function. Also could be a\n CorpusBasedTermScorer instance.\n show_axes : bool, default True\n Show the ticked axes on the plot. If false, show inner axes as a crosshair.\n show_axes_and_cross_hairs : bool, default False\n Show both peripheral axis labels and cross axes.\n show_diagonal : bool, default False\n Show a diagonal line leading from the lower-left ot the upper-right; only makes\n sense to use this if use_global_scale is true.\n use_global_scale : bool, default False\n Use same scale for both axes\n vertical_line_x_position : float, default None\n horizontal_line_y_position : float, default None\n show_cross_axes : bool, default True\n If show_axes is False, do we show cross-axes?\n show_extra : bool\n False by default. Show a fourth column listing contexts in the\n extra categories.\n extra_category_name : str, default None\n "Extra" by default. Only active if show_neutral is True and show_extra is True. Name\n of the extra column.\n censor_points : bool, default True\n Don\'t label over points.\n center_label_over_points : bool, default False\n Center a label over points, or try to find a position near a point that\n doesn\'t overlap anything else.\n x_axis_labels: list, default None\n List of string value-labels to show at evenly spaced intervals on the x-axis.\n Low, medium, high are defaults.\n y_axis_labels : list, default None\n List of string value-labels to show at evenly spaced intervals on the y-axis.\n Low, medium, high are defaults.\n topic_model_term_lists : dict default None\n Dict of metadata name (str) -> List of string terms in metadata. These will be bolded\n in query in context results.\n topic_model_preview_size : int default 10\n Number of terms in topic model to show as a preview.\n metadata_descriptions : dict default None\n Dict of metadata name (str) -> str of metadata description. These will be shown when a meta data term is\n clicked.\n vertical_lines : list default None\n List of floats corresponding to points on the x-axis to draw vertical lines\n characteristic_scorer : CharacteristicScorer default None\n Used for bg scores\n term_colors : dict, default None\n Dictionary mapping term to color\n unified_context : bool, default False\n Boolean displays contexts in a single pane as opposed to separate columns.\n show_category_headings : bool, default True\n Show category headings if unified_context is True.\n highlight_selected_category : bool, default False\n Highlight selected category if unified_context is True.\n include_term_category_counts : bool, default False\n Include the termCounts object in the plot definition.\n div_name : str, None by default\n Give the scatterplot div name a non-default value\n alternative_term_func: str, default None\n Javascript function which take a term JSON object and returns a bool. If the return value is true,\n execute standard term click pipeline. Ex.: `\'(function(termDict) {return true;})\'`.\n term_metadata : dict, None by default\n Dict mapping terms to dictionaries containing additional information which can be used in the color_func\n or the get_tooltip_content function. These will appear in termDict.etc\n term_metadata_df : pd.DataFrame, None by default\n Dataframe version of term_metadata\n include_all_contexts: bool, default False\n Include all contexts, even non-matching ones, in interface\n max_overlapping: int, default -1\n Number of overlapping terms to dislay. If -1, display all. (default)\n show_corpus_stats: bool, default True\n Show the corpus stats div\n sort_doc_labels_by_name: bool default False\n If unified, sort the document labels by name\n always_jump: bool, default True\n Always jump to term contexts if a term is clicked\n enable_term_category_description: bool, default True\n List term/metadata statistics under category\n get_custom_term_html: str, default None\n Javascript function which displays term summary from term info\n header_names: Dict[str, str], default None\n Dictionary giving names of term lists shown to the right of the plot. Valid keys are\n upper, lower and right.\n header_sorting_algos: Dict[str, str], default None\n Dictionary giving javascript sorting algorithms for panes. Valid keys are upper, lower\n and right. Value is a JS function which takes the "data" object.\n ignore_categories: bool, default False\n Signals the plot shouldn\'t display category names. Used in single category plots.\n suppress_text_column: str, default None\n Column in term_metadata_df which indicates term should be hidden\n left_list_column: str, default None\n Column in term_metadata_df which should be used for sorting words into upper and lower\n parts of left word-list sections. Highest values in upper, lowest in lower.\n tooltip_columns: List[str]\n tooltip_column_names: Dict[str, str]\n term_description_columns: List[str]\n term_description_column_names: Dict[str]\n term_word_in_term_description: str, default None\n color_column: str, default None:\n column in term_metadata_df which indicates color\n color_score_column: str, default None\n column in term_metadata df; contains value between 0 and 1 which will be used to assign a color\n label_priority_column : str, default None\n Column in term_metadata_df; larger values in the column indicate a term should be labeled first\n censor_point_column : str, default None\n Should we allow labels to be drawn over point?\n right_order_column : str, default None\n Order for right column ("characteristic" by default); largest first\n background_color : str, default None\n Changes document.body\'s background color to background_color\n line_coordinates : list, default None\n Coordinates for drawing a line under the plot\n subword_encoding : str, default None\n Type of subword encoding to use, None if none, currently supports "RoBERTa"\n top_terms_length : int, default 14\n Number of words to list in most/least associated lists on left-hand side\n top_terms_left_buffer : int, default 0\n Number of pixels left to shift top terms list\n dont_filter : bool, default False\n Don\'t filter any terms when charting\n use_offsets : bool, default False\n Enable the use of metadata offsets\n return_data : bool default False\n Return a dict containing the output of `ScatterChartExplorer.to_dict` instead of\n an html.\n return_scatterplot_structure : bool, default False\n return ScatterplotStructure instead of html\n Returns\n -------\n str\n html of visualization\n\n '
if (singleScoreMode or word_vec_use_p_vals):
d3_color_scale = 'd3.interpolatePurples'
if (singleScoreMode or (not sort_by_dist)):
sort_by_dist = False
else:
sort_by_dist = True
if (term_ranker is None):
term_ranker = termranking.AbsoluteFrequencyRanker
(category_name, not_category_name) = get_category_names(category, category_name, not_categories, not_category_name)
if (not_categories is None):
not_categories = [c for c in corpus.get_categories() if (c != category)]
if term_scorer:
scores = get_term_scorer_scores(category, corpus, neutral_categories, not_categories, show_neutral, term_ranker, term_scorer, use_non_text_features)
if (pmi_filter_thresold is not None):
pmi_threshold_coefficient = pmi_filter_thresold
warnings.warn("The argument name 'pmi_filter_thresold' has been deprecated. Use 'pmi_threshold_coefficient' in its place", DeprecationWarning)
if use_non_text_features:
pmi_threshold_coefficient = 0
scatter_chart_explorer = ScatterChartExplorer(corpus, minimum_term_frequency=minimum_term_frequency, minimum_not_category_term_frequency=minimum_not_category_term_frequency, pmi_threshold_coefficient=pmi_threshold_coefficient, filter_unigrams=filter_unigrams, jitter=jitter, max_terms=max_terms, term_ranker=term_ranker, use_non_text_features=use_non_text_features, term_significance=term_significance, terms_to_include=terms_to_include, dont_filter=dont_filter)
if (((x_coords is None) and (y_coords is not None)) or ((y_coords is None) and (x_coords is not None))):
raise Exception('Both x_coords and y_coords need to be passed or both left blank')
if (x_coords is not None):
scatter_chart_explorer.inject_coordinates(x_coords, y_coords, rescale_x=rescale_x, rescale_y=rescale_y, original_x=original_x, original_y=original_y)
if (topic_model_term_lists is not None):
scatter_chart_explorer.inject_metadata_term_lists(topic_model_term_lists)
if (metadata_descriptions is not None):
scatter_chart_explorer.inject_metadata_descriptions(metadata_descriptions)
if (term_colors is not None):
scatter_chart_explorer.inject_term_colors(term_colors)
if ((term_metadata_df is not None) and (term_metadata is not None)):
raise Exception('Both term_metadata_df and term_metadata cannot be values which are not None.')
if (term_metadata_df is not None):
scatter_chart_explorer.inject_term_metadata_df(term_metadata_df)
if (term_metadata is not None):
scatter_chart_explorer.inject_term_metadata(term_metadata)
html_base = None
if semiotic_square:
html_base = get_semiotic_square_html(num_terms_semiotic_square, semiotic_square)
scatter_chart_data = scatter_chart_explorer.to_dict(category=category, category_name=category_name, not_category_name=not_category_name, not_categories=not_categories, transform=transform, scores=scores, max_docs_per_category=max_docs_per_category, metadata=(metadata if (not callable(metadata)) else metadata(corpus)), alternative_text_field=alternative_text_field, neutral_category_name=neutral_category_name, extra_category_name=extra_category_name, neutral_categories=neutral_categories, extra_categories=extra_categories, background_scorer=characteristic_scorer, include_term_category_counts=include_term_category_counts, use_offsets=use_offsets)
if (line_coordinates is not None):
scatter_chart_data['line'] = line_coordinates
if return_data:
return scatter_chart_data
if (tooltip_columns is not None):
assert (get_tooltip_content is None)
get_tooltip_content = get_tooltip_js_function(term_metadata_df, tooltip_column_names, tooltip_columns)
if (term_description_columns is not None):
assert (get_custom_term_html is None)
get_custom_term_html = get_custom_term_info_js_function(term_metadata_df, term_description_column_names, term_description_columns, term_word_in_term_description)
if color_column:
assert (color_func is None)
color_func = ('(function(d) {return d.etc["%s"]})' % color_column)
if color_score_column:
assert (color_func is None)
color_func = ('(function(d) {return %s(d.etc["%s"])})' % ((d3_color_scale if (d3_color_scale is not None) else 'd3.interpolateRdYlBu'), color_score_column))
if (header_sorting_algos is not None):
assert ('upper' not in header_sorting_algos)
assert ('lower' not in header_sorting_algos)
if (left_list_column is not None):
assert (term_metadata_df is not None)
assert (left_list_column in term_metadata_df)
header_sorting_algos = {'upper': (((('((a,b) => b.etc["' + left_list_column) + '"] - a.etc["') + left_list_column) + '"])'), 'lower': (((('((a,b) => a.etc["' + left_list_column) + '"] - b.etc["') + left_list_column) + '"])')}
if (right_order_column is not None):
assert (right_order_column in term_metadata_df)
scatterplot_structure = ScatterplotStructure(VizDataAdapter(scatter_chart_data), width_in_pixels=width_in_pixels, height_in_pixels=height_in_pixels, max_snippets=max_snippets, color=d3_color_scale, grey_zero_scores=gray_zero_scores, sort_by_dist=sort_by_dist, reverse_sort_scores_for_not_category=reverse_sort_scores_for_not_category, use_full_doc=use_full_doc, asian_mode=asian_mode, match_full_line=match_full_line, use_non_text_features=use_non_text_features, show_characteristic=show_characteristic, word_vec_use_p_vals=word_vec_use_p_vals, max_p_val=max_p_val, save_svg_button=save_svg_button, p_value_colors=p_value_colors, x_label=x_label, y_label=y_label, show_top_terms=show_top_terms, show_neutral=show_neutral, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=show_axes, horizontal_line_y_position=horizontal_line_y_position, vertical_line_x_position=vertical_line_x_position, show_extra=show_extra, do_censor_points=censor_points, center_label_over_points=center_label_over_points, x_axis_labels=x_axis_labels, y_axis_labels=y_axis_labels, topic_model_preview_size=topic_model_preview_size, vertical_lines=vertical_lines, unified_context=unified_context, show_category_headings=show_category_headings, highlight_selected_category=highlight_selected_category, show_cross_axes=show_cross_axes, div_name=div_name, alternative_term_func=alternative_term_func, include_all_contexts=include_all_contexts, show_axes_and_cross_hairs=show_axes_and_cross_hairs, show_diagonal=show_diagonal, use_global_scale=use_global_scale, x_axis_values_format=x_axis_values_format, y_axis_values_format=y_axis_values_format, max_overlapping=max_overlapping, show_corpus_stats=show_corpus_stats, sort_doc_labels_by_name=sort_doc_labels_by_name, enable_term_category_description=enable_term_category_description, always_jump=always_jump, get_custom_term_html=get_custom_term_html, header_names=header_names, header_sorting_algos=header_sorting_algos, ignore_categories=ignore_categories, background_labels=background_labels, label_priority_column=label_priority_column, text_color_column=text_color_column, suppress_text_column=suppress_text_column, background_color=background_color, censor_point_column=censor_point_column, right_order_column=right_order_column, subword_encoding=subword_encoding, top_terms_length=top_terms_length, top_terms_left_buffer=top_terms_left_buffer)
if return_scatterplot_structure:
return scatterplot_structure
return BasicHTMLFromScatterplotStructure(scatterplot_structure).to_html(protocol=protocol, d3_url=d3_url, d3_scale_chromatic_url=d3_scale_chromatic_url, html_base=html_base) | Returns html code of visualization.
Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
Optional, defaults to category name.
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
Optional defaults to "N(n)ot " + category_name, with the case of the 'n' dependent
on the case of the first letter in category_name.
protocol : str, optional
Protocol to use. Either http or https. Default is https.
pmi_threshold_coefficient : int, optional
Filter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 6
minimum_term_frequency : int, optional
Minimum number of times word needs to appear to make it into visualization.
minimum_not_category_term_frequency : int, optional
If an n-gram does not occur in the category, minimum times it
must been seen to be included. Default is 0.
max_terms : int, optional
Maximum number of terms to include in visualization.
filter_unigrams : bool, optional
Default False, do we filter out unigrams that only occur in one bigram
width_in_pixels : int, optional
Width of viz in pixels, if None, default to JS's choice
height_in_pixels : int, optional
Height of viz in pixels, if None, default to JS's choice
max_snippets : int, optional
Maximum number of snippets to show when term is clicked. If None, all are shown.
max_docs_per_category: int, optional
Maximum number of documents to store per category. If None, by default, all are stored.
metadata : list or function, optional
list of meta data strings that will be included for each document, if a function, called on corpus
scores : np.array, optional
Array of term scores or None.
x_coords : np.array, optional
Array of term x-axis positions or None. Must be in [0,1].
If present, y_coords must also be present.
y_coords : np.array, optional
Array of term y-axis positions or None. Must be in [0,1].
If present, x_coords must also be present.
original_x : array-like
Original, unscaled x-values. Defaults to x_coords
original_y : array-like
Original, unscaled y-values. Defaults to y_coords
rescale_x : lambda list[0,1]: list[0,1], optional
Array of term x-axis positions or None. Must be in [0,1].
Rescales x-axis after filtering
rescale_y : lambda list[0,1]: list[0,1], optional
Array of term y-axis positions or None. Must be in [0,1].
Rescales y-axis after filtering
singleScoreMode : bool, optional
Label terms based on score vs distance from corner. Good for topic scores. Show only one color.
sort_by_dist: bool, optional
Label terms based distance from corner. True by default. Negated by singleScoreMode.
reverse_sort_scores_for_not_category: bool, optional
If using a custom score, score the not-category class by
lowest-score-as-most-predictive. Turn this off for word vector
or topic similarity. Default True.
use_full_doc : bool, optional
Use the full document in snippets. False by default.
transform : function, optional
not recommended for editing. change the way terms are ranked. default is st.Scalers.percentile_ordinal
jitter : float, optional
percentage of axis to jitter each point. default is 0.
gray_zero_scores : bool, optional
If True, color points with zero-scores a light shade of grey. False by default.
term_ranker : TermRanker, optional
TermRanker class for determining term frequency ranks.
asian_mode : bool, optional
Use a special Javascript regular expression that's specific to chinese or japanese
match_full_line : bool, optional
Has the javascript regex match the full line instead of part of it
use_non_text_features : bool, optional
Show non-bag-of-words features (e.g., Empath) instead of text. False by default.
show_top_terms : bool, default True
Show top terms on the left-hand side of the visualization
show_characteristic: bool, default True
Show characteristic terms on the far left-hand side of the visualization
word_vec_use_p_vals: bool, default False
Sort by harmonic mean of score and distance.
max_p_val : float, default 0.1
If word_vec_use_p_vals, the minimum p val to use.
p_value_colors : bool, default False
Color points differently if p val is above 1-max_p_val, below max_p_val, or
in between.
term_significance : TermSignificance instance or None
Way of getting signfiance scores. If None, p values will not be added.
save_svg_button : bool, default False
Add a save as SVG button to the page.
x_label : str, default None
Custom x-axis label
y_label : str, default None
Custom y-axis label
d3_url, str, None by default. The url (or path) of d3.
URL of d3, to be inserted into <script src="..."/>. Overrides `protocol`.
By default, this is `DEFAULT_D3_URL` declared in `ScatterplotStructure`.
d3_scale_chromatic_url, str, None by default. Overrides `protocol`.
URL of d3 scale chromatic, to be inserted into <script src="..."/>
By default, this is `DEFAULT_D3_SCALE_CHROMATIC` declared in `ScatterplotStructure`.
pmi_filter_thresold : (DEPRECATED) int, None by default
DEPRECATED. Use pmi_threshold_coefficient instead.
alternative_text_field : str or None, optional
Field in from dataframe used to make corpus to display in place of parsed text. Only
can be used if corpus is a ParsedCorpus instance.
terms_to_include : list or None, optional
Whitelist of terms to include in visualization.
semiotic_square : SemioticSquareBase
None by default. SemioticSquare based on corpus. Includes square above visualization.
num_terms_semiotic_square : int
10 by default. Number of terms to show in semiotic square.
Only active if semiotic square is present.
not_categories : list
All categories other than category by default. Documents labeled
with remaining category.
neutral_categories : list
[] by default. Documents labeled neutral.
extra_categories : list
[] by default. Documents labeled extra.
show_neutral : bool
False by default. Show a third column listing contexts in the
neutral categories.
neutral_category_name : str
"Neutral" by default. Only active if show_neutral is True. Name of the neutral
column.
get_tooltip_content : str
Javascript function to control content of tooltip. Function takes a parameter
which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and
returns a string.
x_axis_values : list, default None
Value-labels to show on x-axis. Low, medium, high are defaults.
y_axis_values : list, default None
Value-labels to show on y-axis. Low, medium, high are defaults.
x_axis_values_format : str, default None
d3 format of x-axis values
y_axis_values_format : str, default None
d3 format of y-axis values
color_func : str, default None
Javascript function to control color of a point. Function takes a parameter
which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and
returns a string.
term_scorer : Object, default None
In lieu of scores, object with a get_scores(a,b) function that returns a set of scores,
where a and b are term counts. Scorer optionally has a get_term_freqs function. Also could be a
CorpusBasedTermScorer instance.
show_axes : bool, default True
Show the ticked axes on the plot. If false, show inner axes as a crosshair.
show_axes_and_cross_hairs : bool, default False
Show both peripheral axis labels and cross axes.
show_diagonal : bool, default False
Show a diagonal line leading from the lower-left ot the upper-right; only makes
sense to use this if use_global_scale is true.
use_global_scale : bool, default False
Use same scale for both axes
vertical_line_x_position : float, default None
horizontal_line_y_position : float, default None
show_cross_axes : bool, default True
If show_axes is False, do we show cross-axes?
show_extra : bool
False by default. Show a fourth column listing contexts in the
extra categories.
extra_category_name : str, default None
"Extra" by default. Only active if show_neutral is True and show_extra is True. Name
of the extra column.
censor_points : bool, default True
Don't label over points.
center_label_over_points : bool, default False
Center a label over points, or try to find a position near a point that
doesn't overlap anything else.
x_axis_labels: list, default None
List of string value-labels to show at evenly spaced intervals on the x-axis.
Low, medium, high are defaults.
y_axis_labels : list, default None
List of string value-labels to show at evenly spaced intervals on the y-axis.
Low, medium, high are defaults.
topic_model_term_lists : dict default None
Dict of metadata name (str) -> List of string terms in metadata. These will be bolded
in query in context results.
topic_model_preview_size : int default 10
Number of terms in topic model to show as a preview.
metadata_descriptions : dict default None
Dict of metadata name (str) -> str of metadata description. These will be shown when a meta data term is
clicked.
vertical_lines : list default None
List of floats corresponding to points on the x-axis to draw vertical lines
characteristic_scorer : CharacteristicScorer default None
Used for bg scores
term_colors : dict, default None
Dictionary mapping term to color
unified_context : bool, default False
Boolean displays contexts in a single pane as opposed to separate columns.
show_category_headings : bool, default True
Show category headings if unified_context is True.
highlight_selected_category : bool, default False
Highlight selected category if unified_context is True.
include_term_category_counts : bool, default False
Include the termCounts object in the plot definition.
div_name : str, None by default
Give the scatterplot div name a non-default value
alternative_term_func: str, default None
Javascript function which take a term JSON object and returns a bool. If the return value is true,
execute standard term click pipeline. Ex.: `'(function(termDict) {return true;})'`.
term_metadata : dict, None by default
Dict mapping terms to dictionaries containing additional information which can be used in the color_func
or the get_tooltip_content function. These will appear in termDict.etc
term_metadata_df : pd.DataFrame, None by default
Dataframe version of term_metadata
include_all_contexts: bool, default False
Include all contexts, even non-matching ones, in interface
max_overlapping: int, default -1
Number of overlapping terms to dislay. If -1, display all. (default)
show_corpus_stats: bool, default True
Show the corpus stats div
sort_doc_labels_by_name: bool default False
If unified, sort the document labels by name
always_jump: bool, default True
Always jump to term contexts if a term is clicked
enable_term_category_description: bool, default True
List term/metadata statistics under category
get_custom_term_html: str, default None
Javascript function which displays term summary from term info
header_names: Dict[str, str], default None
Dictionary giving names of term lists shown to the right of the plot. Valid keys are
upper, lower and right.
header_sorting_algos: Dict[str, str], default None
Dictionary giving javascript sorting algorithms for panes. Valid keys are upper, lower
and right. Value is a JS function which takes the "data" object.
ignore_categories: bool, default False
Signals the plot shouldn't display category names. Used in single category plots.
suppress_text_column: str, default None
Column in term_metadata_df which indicates term should be hidden
left_list_column: str, default None
Column in term_metadata_df which should be used for sorting words into upper and lower
parts of left word-list sections. Highest values in upper, lowest in lower.
tooltip_columns: List[str]
tooltip_column_names: Dict[str, str]
term_description_columns: List[str]
term_description_column_names: Dict[str]
term_word_in_term_description: str, default None
color_column: str, default None:
column in term_metadata_df which indicates color
color_score_column: str, default None
column in term_metadata df; contains value between 0 and 1 which will be used to assign a color
label_priority_column : str, default None
Column in term_metadata_df; larger values in the column indicate a term should be labeled first
censor_point_column : str, default None
Should we allow labels to be drawn over point?
right_order_column : str, default None
Order for right column ("characteristic" by default); largest first
background_color : str, default None
Changes document.body's background color to background_color
line_coordinates : list, default None
Coordinates for drawing a line under the plot
subword_encoding : str, default None
Type of subword encoding to use, None if none, currently supports "RoBERTa"
top_terms_length : int, default 14
Number of words to list in most/least associated lists on left-hand side
top_terms_left_buffer : int, default 0
Number of pixels left to shift top terms list
dont_filter : bool, default False
Don't filter any terms when charting
use_offsets : bool, default False
Enable the use of metadata offsets
return_data : bool default False
Return a dict containing the output of `ScatterChartExplorer.to_dict` instead of
an html.
return_scatterplot_structure : bool, default False
return ScatterplotStructure instead of html
Returns
-------
str
html of visualization | scattertext/__init__.py | produce_scattertext_explorer | JasonKessler/scattertext | 1,823 | python | def produce_scattertext_explorer(corpus, category, category_name=None, not_category_name=None, protocol='https', pmi_threshold_coefficient=DEFAULT_MINIMUM_TERM_FREQUENCY, minimum_term_frequency=DEFAULT_PMI_THRESHOLD_COEFFICIENT, minimum_not_category_term_frequency=0, max_terms=None, filter_unigrams=False, height_in_pixels=None, width_in_pixels=None, max_snippets=None, max_docs_per_category=None, metadata=None, scores=None, x_coords=None, y_coords=None, original_x=None, original_y=None, rescale_x=None, rescale_y=None, singleScoreMode=False, sort_by_dist=False, reverse_sort_scores_for_not_category=True, use_full_doc=False, transform=percentile_alphabetical, jitter=0, gray_zero_scores=False, term_ranker=None, asian_mode=False, match_full_line=False, use_non_text_features=False, show_top_terms=True, show_characteristic=True, word_vec_use_p_vals=False, max_p_val=0.1, p_value_colors=False, term_significance=None, save_svg_button=False, x_label=None, y_label=None, d3_url=None, d3_scale_chromatic_url=None, pmi_filter_thresold=None, alternative_text_field=None, terms_to_include=None, semiotic_square=None, num_terms_semiotic_square=None, not_categories=None, neutral_categories=[], extra_categories=[], show_neutral=False, neutral_category_name=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, x_axis_values_format=None, y_axis_values_format=None, color_func=None, term_scorer=None, show_axes=True, show_axes_and_cross_hairs=False, show_diagonal=False, use_global_scale=False, horizontal_line_y_position=None, vertical_line_x_position=None, show_cross_axes=True, show_extra=False, extra_category_name=None, censor_points=True, center_label_over_points=False, x_axis_labels=None, y_axis_labels=None, topic_model_term_lists=None, topic_model_preview_size=10, metadata_descriptions=None, vertical_lines=None, characteristic_scorer=None, term_colors=None, unified_context=False, show_category_headings=True, highlight_selected_category=False, include_term_category_counts=False, div_name=None, alternative_term_func=None, term_metadata=None, term_metadata_df=None, max_overlapping=(- 1), include_all_contexts=False, show_corpus_stats=True, sort_doc_labels_by_name=False, enable_term_category_description=True, always_jump=True, get_custom_term_html=None, header_names=None, header_sorting_algos=None, ignore_categories=False, d3_color_scale=None, background_labels=None, tooltip_columns=None, tooltip_column_names=None, term_description_columns=None, term_description_column_names=None, term_word_in_term_description='Term', color_column=None, color_score_column=None, label_priority_column=None, text_color_column=None, suppress_text_column=None, background_color=None, left_list_column=None, censor_point_column=None, right_order_column=None, line_coordinates=None, subword_encoding=None, top_terms_length=14, top_terms_left_buffer=0, dont_filter=False, use_offsets=False, return_data=False, return_scatterplot_structure=False):
'Returns html code of visualization.\n\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n Optional, defaults to category name.\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n Optional defaults to "N(n)ot " + category_name, with the case of the \'n\' dependent\n on the case of the first letter in category_name.\n protocol : str, optional\n Protocol to use. Either http or https. Default is https.\n pmi_threshold_coefficient : int, optional\n Filter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 6\n minimum_term_frequency : int, optional\n Minimum number of times word needs to appear to make it into visualization.\n minimum_not_category_term_frequency : int, optional\n If an n-gram does not occur in the category, minimum times it\n must been seen to be included. Default is 0.\n max_terms : int, optional\n Maximum number of terms to include in visualization.\n filter_unigrams : bool, optional\n Default False, do we filter out unigrams that only occur in one bigram\n width_in_pixels : int, optional\n Width of viz in pixels, if None, default to JS\'s choice\n height_in_pixels : int, optional\n Height of viz in pixels, if None, default to JS\'s choice\n max_snippets : int, optional\n Maximum number of snippets to show when term is clicked. If None, all are shown.\n max_docs_per_category: int, optional\n Maximum number of documents to store per category. If None, by default, all are stored.\n metadata : list or function, optional\n list of meta data strings that will be included for each document, if a function, called on corpus\n scores : np.array, optional\n Array of term scores or None.\n x_coords : np.array, optional\n Array of term x-axis positions or None. Must be in [0,1].\n If present, y_coords must also be present.\n y_coords : np.array, optional\n Array of term y-axis positions or None. Must be in [0,1].\n If present, x_coords must also be present.\n original_x : array-like\n Original, unscaled x-values. Defaults to x_coords\n original_y : array-like\n Original, unscaled y-values. Defaults to y_coords\n rescale_x : lambda list[0,1]: list[0,1], optional\n Array of term x-axis positions or None. Must be in [0,1].\n Rescales x-axis after filtering\n rescale_y : lambda list[0,1]: list[0,1], optional\n Array of term y-axis positions or None. Must be in [0,1].\n Rescales y-axis after filtering\n singleScoreMode : bool, optional\n Label terms based on score vs distance from corner. Good for topic scores. Show only one color.\n sort_by_dist: bool, optional\n Label terms based distance from corner. True by default. Negated by singleScoreMode.\n reverse_sort_scores_for_not_category: bool, optional\n If using a custom score, score the not-category class by\n lowest-score-as-most-predictive. Turn this off for word vector\n or topic similarity. Default True.\n use_full_doc : bool, optional\n Use the full document in snippets. False by default.\n transform : function, optional\n not recommended for editing. change the way terms are ranked. default is st.Scalers.percentile_ordinal\n jitter : float, optional\n percentage of axis to jitter each point. default is 0.\n gray_zero_scores : bool, optional\n If True, color points with zero-scores a light shade of grey. False by default.\n term_ranker : TermRanker, optional\n TermRanker class for determining term frequency ranks.\n asian_mode : bool, optional\n Use a special Javascript regular expression that\'s specific to chinese or japanese\n match_full_line : bool, optional\n Has the javascript regex match the full line instead of part of it\n use_non_text_features : bool, optional\n Show non-bag-of-words features (e.g., Empath) instead of text. False by default.\n show_top_terms : bool, default True\n Show top terms on the left-hand side of the visualization\n show_characteristic: bool, default True\n Show characteristic terms on the far left-hand side of the visualization\n word_vec_use_p_vals: bool, default False\n Sort by harmonic mean of score and distance.\n max_p_val : float, default 0.1\n If word_vec_use_p_vals, the minimum p val to use.\n p_value_colors : bool, default False\n Color points differently if p val is above 1-max_p_val, below max_p_val, or\n in between.\n term_significance : TermSignificance instance or None\n Way of getting signfiance scores. If None, p values will not be added.\n save_svg_button : bool, default False\n Add a save as SVG button to the page.\n x_label : str, default None\n Custom x-axis label\n y_label : str, default None\n Custom y-axis label\n d3_url, str, None by default. The url (or path) of d3.\n URL of d3, to be inserted into <script src="..."/>. Overrides `protocol`.\n By default, this is `DEFAULT_D3_URL` declared in `ScatterplotStructure`.\n d3_scale_chromatic_url, str, None by default. Overrides `protocol`.\n URL of d3 scale chromatic, to be inserted into <script src="..."/>\n By default, this is `DEFAULT_D3_SCALE_CHROMATIC` declared in `ScatterplotStructure`.\n pmi_filter_thresold : (DEPRECATED) int, None by default\n DEPRECATED. Use pmi_threshold_coefficient instead.\n alternative_text_field : str or None, optional\n Field in from dataframe used to make corpus to display in place of parsed text. Only\n can be used if corpus is a ParsedCorpus instance.\n terms_to_include : list or None, optional\n Whitelist of terms to include in visualization.\n semiotic_square : SemioticSquareBase\n None by default. SemioticSquare based on corpus. Includes square above visualization.\n num_terms_semiotic_square : int\n 10 by default. Number of terms to show in semiotic square.\n Only active if semiotic square is present.\n not_categories : list\n All categories other than category by default. Documents labeled\n with remaining category.\n neutral_categories : list\n [] by default. Documents labeled neutral.\n extra_categories : list\n [] by default. Documents labeled extra.\n show_neutral : bool\n False by default. Show a third column listing contexts in the\n neutral categories.\n neutral_category_name : str\n "Neutral" by default. Only active if show_neutral is True. Name of the neutral\n column.\n get_tooltip_content : str\n Javascript function to control content of tooltip. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string.\n x_axis_values : list, default None\n Value-labels to show on x-axis. Low, medium, high are defaults.\n y_axis_values : list, default None\n Value-labels to show on y-axis. Low, medium, high are defaults.\n x_axis_values_format : str, default None\n d3 format of x-axis values\n y_axis_values_format : str, default None\n d3 format of y-axis values\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string.\n term_scorer : Object, default None\n In lieu of scores, object with a get_scores(a,b) function that returns a set of scores,\n where a and b are term counts. Scorer optionally has a get_term_freqs function. Also could be a\n CorpusBasedTermScorer instance.\n show_axes : bool, default True\n Show the ticked axes on the plot. If false, show inner axes as a crosshair.\n show_axes_and_cross_hairs : bool, default False\n Show both peripheral axis labels and cross axes.\n show_diagonal : bool, default False\n Show a diagonal line leading from the lower-left ot the upper-right; only makes\n sense to use this if use_global_scale is true.\n use_global_scale : bool, default False\n Use same scale for both axes\n vertical_line_x_position : float, default None\n horizontal_line_y_position : float, default None\n show_cross_axes : bool, default True\n If show_axes is False, do we show cross-axes?\n show_extra : bool\n False by default. Show a fourth column listing contexts in the\n extra categories.\n extra_category_name : str, default None\n "Extra" by default. Only active if show_neutral is True and show_extra is True. Name\n of the extra column.\n censor_points : bool, default True\n Don\'t label over points.\n center_label_over_points : bool, default False\n Center a label over points, or try to find a position near a point that\n doesn\'t overlap anything else.\n x_axis_labels: list, default None\n List of string value-labels to show at evenly spaced intervals on the x-axis.\n Low, medium, high are defaults.\n y_axis_labels : list, default None\n List of string value-labels to show at evenly spaced intervals on the y-axis.\n Low, medium, high are defaults.\n topic_model_term_lists : dict default None\n Dict of metadata name (str) -> List of string terms in metadata. These will be bolded\n in query in context results.\n topic_model_preview_size : int default 10\n Number of terms in topic model to show as a preview.\n metadata_descriptions : dict default None\n Dict of metadata name (str) -> str of metadata description. These will be shown when a meta data term is\n clicked.\n vertical_lines : list default None\n List of floats corresponding to points on the x-axis to draw vertical lines\n characteristic_scorer : CharacteristicScorer default None\n Used for bg scores\n term_colors : dict, default None\n Dictionary mapping term to color\n unified_context : bool, default False\n Boolean displays contexts in a single pane as opposed to separate columns.\n show_category_headings : bool, default True\n Show category headings if unified_context is True.\n highlight_selected_category : bool, default False\n Highlight selected category if unified_context is True.\n include_term_category_counts : bool, default False\n Include the termCounts object in the plot definition.\n div_name : str, None by default\n Give the scatterplot div name a non-default value\n alternative_term_func: str, default None\n Javascript function which take a term JSON object and returns a bool. If the return value is true,\n execute standard term click pipeline. Ex.: `\'(function(termDict) {return true;})\'`.\n term_metadata : dict, None by default\n Dict mapping terms to dictionaries containing additional information which can be used in the color_func\n or the get_tooltip_content function. These will appear in termDict.etc\n term_metadata_df : pd.DataFrame, None by default\n Dataframe version of term_metadata\n include_all_contexts: bool, default False\n Include all contexts, even non-matching ones, in interface\n max_overlapping: int, default -1\n Number of overlapping terms to dislay. If -1, display all. (default)\n show_corpus_stats: bool, default True\n Show the corpus stats div\n sort_doc_labels_by_name: bool default False\n If unified, sort the document labels by name\n always_jump: bool, default True\n Always jump to term contexts if a term is clicked\n enable_term_category_description: bool, default True\n List term/metadata statistics under category\n get_custom_term_html: str, default None\n Javascript function which displays term summary from term info\n header_names: Dict[str, str], default None\n Dictionary giving names of term lists shown to the right of the plot. Valid keys are\n upper, lower and right.\n header_sorting_algos: Dict[str, str], default None\n Dictionary giving javascript sorting algorithms for panes. Valid keys are upper, lower\n and right. Value is a JS function which takes the "data" object.\n ignore_categories: bool, default False\n Signals the plot shouldn\'t display category names. Used in single category plots.\n suppress_text_column: str, default None\n Column in term_metadata_df which indicates term should be hidden\n left_list_column: str, default None\n Column in term_metadata_df which should be used for sorting words into upper and lower\n parts of left word-list sections. Highest values in upper, lowest in lower.\n tooltip_columns: List[str]\n tooltip_column_names: Dict[str, str]\n term_description_columns: List[str]\n term_description_column_names: Dict[str]\n term_word_in_term_description: str, default None\n color_column: str, default None:\n column in term_metadata_df which indicates color\n color_score_column: str, default None\n column in term_metadata df; contains value between 0 and 1 which will be used to assign a color\n label_priority_column : str, default None\n Column in term_metadata_df; larger values in the column indicate a term should be labeled first\n censor_point_column : str, default None\n Should we allow labels to be drawn over point?\n right_order_column : str, default None\n Order for right column ("characteristic" by default); largest first\n background_color : str, default None\n Changes document.body\'s background color to background_color\n line_coordinates : list, default None\n Coordinates for drawing a line under the plot\n subword_encoding : str, default None\n Type of subword encoding to use, None if none, currently supports "RoBERTa"\n top_terms_length : int, default 14\n Number of words to list in most/least associated lists on left-hand side\n top_terms_left_buffer : int, default 0\n Number of pixels left to shift top terms list\n dont_filter : bool, default False\n Don\'t filter any terms when charting\n use_offsets : bool, default False\n Enable the use of metadata offsets\n return_data : bool default False\n Return a dict containing the output of `ScatterChartExplorer.to_dict` instead of\n an html.\n return_scatterplot_structure : bool, default False\n return ScatterplotStructure instead of html\n Returns\n -------\n str\n html of visualization\n\n '
if (singleScoreMode or word_vec_use_p_vals):
d3_color_scale = 'd3.interpolatePurples'
if (singleScoreMode or (not sort_by_dist)):
sort_by_dist = False
else:
sort_by_dist = True
if (term_ranker is None):
term_ranker = termranking.AbsoluteFrequencyRanker
(category_name, not_category_name) = get_category_names(category, category_name, not_categories, not_category_name)
if (not_categories is None):
not_categories = [c for c in corpus.get_categories() if (c != category)]
if term_scorer:
scores = get_term_scorer_scores(category, corpus, neutral_categories, not_categories, show_neutral, term_ranker, term_scorer, use_non_text_features)
if (pmi_filter_thresold is not None):
pmi_threshold_coefficient = pmi_filter_thresold
warnings.warn("The argument name 'pmi_filter_thresold' has been deprecated. Use 'pmi_threshold_coefficient' in its place", DeprecationWarning)
if use_non_text_features:
pmi_threshold_coefficient = 0
scatter_chart_explorer = ScatterChartExplorer(corpus, minimum_term_frequency=minimum_term_frequency, minimum_not_category_term_frequency=minimum_not_category_term_frequency, pmi_threshold_coefficient=pmi_threshold_coefficient, filter_unigrams=filter_unigrams, jitter=jitter, max_terms=max_terms, term_ranker=term_ranker, use_non_text_features=use_non_text_features, term_significance=term_significance, terms_to_include=terms_to_include, dont_filter=dont_filter)
if (((x_coords is None) and (y_coords is not None)) or ((y_coords is None) and (x_coords is not None))):
raise Exception('Both x_coords and y_coords need to be passed or both left blank')
if (x_coords is not None):
scatter_chart_explorer.inject_coordinates(x_coords, y_coords, rescale_x=rescale_x, rescale_y=rescale_y, original_x=original_x, original_y=original_y)
if (topic_model_term_lists is not None):
scatter_chart_explorer.inject_metadata_term_lists(topic_model_term_lists)
if (metadata_descriptions is not None):
scatter_chart_explorer.inject_metadata_descriptions(metadata_descriptions)
if (term_colors is not None):
scatter_chart_explorer.inject_term_colors(term_colors)
if ((term_metadata_df is not None) and (term_metadata is not None)):
raise Exception('Both term_metadata_df and term_metadata cannot be values which are not None.')
if (term_metadata_df is not None):
scatter_chart_explorer.inject_term_metadata_df(term_metadata_df)
if (term_metadata is not None):
scatter_chart_explorer.inject_term_metadata(term_metadata)
html_base = None
if semiotic_square:
html_base = get_semiotic_square_html(num_terms_semiotic_square, semiotic_square)
scatter_chart_data = scatter_chart_explorer.to_dict(category=category, category_name=category_name, not_category_name=not_category_name, not_categories=not_categories, transform=transform, scores=scores, max_docs_per_category=max_docs_per_category, metadata=(metadata if (not callable(metadata)) else metadata(corpus)), alternative_text_field=alternative_text_field, neutral_category_name=neutral_category_name, extra_category_name=extra_category_name, neutral_categories=neutral_categories, extra_categories=extra_categories, background_scorer=characteristic_scorer, include_term_category_counts=include_term_category_counts, use_offsets=use_offsets)
if (line_coordinates is not None):
scatter_chart_data['line'] = line_coordinates
if return_data:
return scatter_chart_data
if (tooltip_columns is not None):
assert (get_tooltip_content is None)
get_tooltip_content = get_tooltip_js_function(term_metadata_df, tooltip_column_names, tooltip_columns)
if (term_description_columns is not None):
assert (get_custom_term_html is None)
get_custom_term_html = get_custom_term_info_js_function(term_metadata_df, term_description_column_names, term_description_columns, term_word_in_term_description)
if color_column:
assert (color_func is None)
color_func = ('(function(d) {return d.etc["%s"]})' % color_column)
if color_score_column:
assert (color_func is None)
color_func = ('(function(d) {return %s(d.etc["%s"])})' % ((d3_color_scale if (d3_color_scale is not None) else 'd3.interpolateRdYlBu'), color_score_column))
if (header_sorting_algos is not None):
assert ('upper' not in header_sorting_algos)
assert ('lower' not in header_sorting_algos)
if (left_list_column is not None):
assert (term_metadata_df is not None)
assert (left_list_column in term_metadata_df)
header_sorting_algos = {'upper': (((('((a,b) => b.etc["' + left_list_column) + '"] - a.etc["') + left_list_column) + '"])'), 'lower': (((('((a,b) => a.etc["' + left_list_column) + '"] - b.etc["') + left_list_column) + '"])')}
if (right_order_column is not None):
assert (right_order_column in term_metadata_df)
scatterplot_structure = ScatterplotStructure(VizDataAdapter(scatter_chart_data), width_in_pixels=width_in_pixels, height_in_pixels=height_in_pixels, max_snippets=max_snippets, color=d3_color_scale, grey_zero_scores=gray_zero_scores, sort_by_dist=sort_by_dist, reverse_sort_scores_for_not_category=reverse_sort_scores_for_not_category, use_full_doc=use_full_doc, asian_mode=asian_mode, match_full_line=match_full_line, use_non_text_features=use_non_text_features, show_characteristic=show_characteristic, word_vec_use_p_vals=word_vec_use_p_vals, max_p_val=max_p_val, save_svg_button=save_svg_button, p_value_colors=p_value_colors, x_label=x_label, y_label=y_label, show_top_terms=show_top_terms, show_neutral=show_neutral, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=show_axes, horizontal_line_y_position=horizontal_line_y_position, vertical_line_x_position=vertical_line_x_position, show_extra=show_extra, do_censor_points=censor_points, center_label_over_points=center_label_over_points, x_axis_labels=x_axis_labels, y_axis_labels=y_axis_labels, topic_model_preview_size=topic_model_preview_size, vertical_lines=vertical_lines, unified_context=unified_context, show_category_headings=show_category_headings, highlight_selected_category=highlight_selected_category, show_cross_axes=show_cross_axes, div_name=div_name, alternative_term_func=alternative_term_func, include_all_contexts=include_all_contexts, show_axes_and_cross_hairs=show_axes_and_cross_hairs, show_diagonal=show_diagonal, use_global_scale=use_global_scale, x_axis_values_format=x_axis_values_format, y_axis_values_format=y_axis_values_format, max_overlapping=max_overlapping, show_corpus_stats=show_corpus_stats, sort_doc_labels_by_name=sort_doc_labels_by_name, enable_term_category_description=enable_term_category_description, always_jump=always_jump, get_custom_term_html=get_custom_term_html, header_names=header_names, header_sorting_algos=header_sorting_algos, ignore_categories=ignore_categories, background_labels=background_labels, label_priority_column=label_priority_column, text_color_column=text_color_column, suppress_text_column=suppress_text_column, background_color=background_color, censor_point_column=censor_point_column, right_order_column=right_order_column, subword_encoding=subword_encoding, top_terms_length=top_terms_length, top_terms_left_buffer=top_terms_left_buffer)
if return_scatterplot_structure:
return scatterplot_structure
return BasicHTMLFromScatterplotStructure(scatterplot_structure).to_html(protocol=protocol, d3_url=d3_url, d3_scale_chromatic_url=d3_scale_chromatic_url, html_base=html_base) | def produce_scattertext_explorer(corpus, category, category_name=None, not_category_name=None, protocol='https', pmi_threshold_coefficient=DEFAULT_MINIMUM_TERM_FREQUENCY, minimum_term_frequency=DEFAULT_PMI_THRESHOLD_COEFFICIENT, minimum_not_category_term_frequency=0, max_terms=None, filter_unigrams=False, height_in_pixels=None, width_in_pixels=None, max_snippets=None, max_docs_per_category=None, metadata=None, scores=None, x_coords=None, y_coords=None, original_x=None, original_y=None, rescale_x=None, rescale_y=None, singleScoreMode=False, sort_by_dist=False, reverse_sort_scores_for_not_category=True, use_full_doc=False, transform=percentile_alphabetical, jitter=0, gray_zero_scores=False, term_ranker=None, asian_mode=False, match_full_line=False, use_non_text_features=False, show_top_terms=True, show_characteristic=True, word_vec_use_p_vals=False, max_p_val=0.1, p_value_colors=False, term_significance=None, save_svg_button=False, x_label=None, y_label=None, d3_url=None, d3_scale_chromatic_url=None, pmi_filter_thresold=None, alternative_text_field=None, terms_to_include=None, semiotic_square=None, num_terms_semiotic_square=None, not_categories=None, neutral_categories=[], extra_categories=[], show_neutral=False, neutral_category_name=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, x_axis_values_format=None, y_axis_values_format=None, color_func=None, term_scorer=None, show_axes=True, show_axes_and_cross_hairs=False, show_diagonal=False, use_global_scale=False, horizontal_line_y_position=None, vertical_line_x_position=None, show_cross_axes=True, show_extra=False, extra_category_name=None, censor_points=True, center_label_over_points=False, x_axis_labels=None, y_axis_labels=None, topic_model_term_lists=None, topic_model_preview_size=10, metadata_descriptions=None, vertical_lines=None, characteristic_scorer=None, term_colors=None, unified_context=False, show_category_headings=True, highlight_selected_category=False, include_term_category_counts=False, div_name=None, alternative_term_func=None, term_metadata=None, term_metadata_df=None, max_overlapping=(- 1), include_all_contexts=False, show_corpus_stats=True, sort_doc_labels_by_name=False, enable_term_category_description=True, always_jump=True, get_custom_term_html=None, header_names=None, header_sorting_algos=None, ignore_categories=False, d3_color_scale=None, background_labels=None, tooltip_columns=None, tooltip_column_names=None, term_description_columns=None, term_description_column_names=None, term_word_in_term_description='Term', color_column=None, color_score_column=None, label_priority_column=None, text_color_column=None, suppress_text_column=None, background_color=None, left_list_column=None, censor_point_column=None, right_order_column=None, line_coordinates=None, subword_encoding=None, top_terms_length=14, top_terms_left_buffer=0, dont_filter=False, use_offsets=False, return_data=False, return_scatterplot_structure=False):
'Returns html code of visualization.\n\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n Optional, defaults to category name.\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n Optional defaults to "N(n)ot " + category_name, with the case of the \'n\' dependent\n on the case of the first letter in category_name.\n protocol : str, optional\n Protocol to use. Either http or https. Default is https.\n pmi_threshold_coefficient : int, optional\n Filter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 6\n minimum_term_frequency : int, optional\n Minimum number of times word needs to appear to make it into visualization.\n minimum_not_category_term_frequency : int, optional\n If an n-gram does not occur in the category, minimum times it\n must been seen to be included. Default is 0.\n max_terms : int, optional\n Maximum number of terms to include in visualization.\n filter_unigrams : bool, optional\n Default False, do we filter out unigrams that only occur in one bigram\n width_in_pixels : int, optional\n Width of viz in pixels, if None, default to JS\'s choice\n height_in_pixels : int, optional\n Height of viz in pixels, if None, default to JS\'s choice\n max_snippets : int, optional\n Maximum number of snippets to show when term is clicked. If None, all are shown.\n max_docs_per_category: int, optional\n Maximum number of documents to store per category. If None, by default, all are stored.\n metadata : list or function, optional\n list of meta data strings that will be included for each document, if a function, called on corpus\n scores : np.array, optional\n Array of term scores or None.\n x_coords : np.array, optional\n Array of term x-axis positions or None. Must be in [0,1].\n If present, y_coords must also be present.\n y_coords : np.array, optional\n Array of term y-axis positions or None. Must be in [0,1].\n If present, x_coords must also be present.\n original_x : array-like\n Original, unscaled x-values. Defaults to x_coords\n original_y : array-like\n Original, unscaled y-values. Defaults to y_coords\n rescale_x : lambda list[0,1]: list[0,1], optional\n Array of term x-axis positions or None. Must be in [0,1].\n Rescales x-axis after filtering\n rescale_y : lambda list[0,1]: list[0,1], optional\n Array of term y-axis positions or None. Must be in [0,1].\n Rescales y-axis after filtering\n singleScoreMode : bool, optional\n Label terms based on score vs distance from corner. Good for topic scores. Show only one color.\n sort_by_dist: bool, optional\n Label terms based distance from corner. True by default. Negated by singleScoreMode.\n reverse_sort_scores_for_not_category: bool, optional\n If using a custom score, score the not-category class by\n lowest-score-as-most-predictive. Turn this off for word vector\n or topic similarity. Default True.\n use_full_doc : bool, optional\n Use the full document in snippets. False by default.\n transform : function, optional\n not recommended for editing. change the way terms are ranked. default is st.Scalers.percentile_ordinal\n jitter : float, optional\n percentage of axis to jitter each point. default is 0.\n gray_zero_scores : bool, optional\n If True, color points with zero-scores a light shade of grey. False by default.\n term_ranker : TermRanker, optional\n TermRanker class for determining term frequency ranks.\n asian_mode : bool, optional\n Use a special Javascript regular expression that\'s specific to chinese or japanese\n match_full_line : bool, optional\n Has the javascript regex match the full line instead of part of it\n use_non_text_features : bool, optional\n Show non-bag-of-words features (e.g., Empath) instead of text. False by default.\n show_top_terms : bool, default True\n Show top terms on the left-hand side of the visualization\n show_characteristic: bool, default True\n Show characteristic terms on the far left-hand side of the visualization\n word_vec_use_p_vals: bool, default False\n Sort by harmonic mean of score and distance.\n max_p_val : float, default 0.1\n If word_vec_use_p_vals, the minimum p val to use.\n p_value_colors : bool, default False\n Color points differently if p val is above 1-max_p_val, below max_p_val, or\n in between.\n term_significance : TermSignificance instance or None\n Way of getting signfiance scores. If None, p values will not be added.\n save_svg_button : bool, default False\n Add a save as SVG button to the page.\n x_label : str, default None\n Custom x-axis label\n y_label : str, default None\n Custom y-axis label\n d3_url, str, None by default. The url (or path) of d3.\n URL of d3, to be inserted into <script src="..."/>. Overrides `protocol`.\n By default, this is `DEFAULT_D3_URL` declared in `ScatterplotStructure`.\n d3_scale_chromatic_url, str, None by default. Overrides `protocol`.\n URL of d3 scale chromatic, to be inserted into <script src="..."/>\n By default, this is `DEFAULT_D3_SCALE_CHROMATIC` declared in `ScatterplotStructure`.\n pmi_filter_thresold : (DEPRECATED) int, None by default\n DEPRECATED. Use pmi_threshold_coefficient instead.\n alternative_text_field : str or None, optional\n Field in from dataframe used to make corpus to display in place of parsed text. Only\n can be used if corpus is a ParsedCorpus instance.\n terms_to_include : list or None, optional\n Whitelist of terms to include in visualization.\n semiotic_square : SemioticSquareBase\n None by default. SemioticSquare based on corpus. Includes square above visualization.\n num_terms_semiotic_square : int\n 10 by default. Number of terms to show in semiotic square.\n Only active if semiotic square is present.\n not_categories : list\n All categories other than category by default. Documents labeled\n with remaining category.\n neutral_categories : list\n [] by default. Documents labeled neutral.\n extra_categories : list\n [] by default. Documents labeled extra.\n show_neutral : bool\n False by default. Show a third column listing contexts in the\n neutral categories.\n neutral_category_name : str\n "Neutral" by default. Only active if show_neutral is True. Name of the neutral\n column.\n get_tooltip_content : str\n Javascript function to control content of tooltip. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string.\n x_axis_values : list, default None\n Value-labels to show on x-axis. Low, medium, high are defaults.\n y_axis_values : list, default None\n Value-labels to show on y-axis. Low, medium, high are defaults.\n x_axis_values_format : str, default None\n d3 format of x-axis values\n y_axis_values_format : str, default None\n d3 format of y-axis values\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string.\n term_scorer : Object, default None\n In lieu of scores, object with a get_scores(a,b) function that returns a set of scores,\n where a and b are term counts. Scorer optionally has a get_term_freqs function. Also could be a\n CorpusBasedTermScorer instance.\n show_axes : bool, default True\n Show the ticked axes on the plot. If false, show inner axes as a crosshair.\n show_axes_and_cross_hairs : bool, default False\n Show both peripheral axis labels and cross axes.\n show_diagonal : bool, default False\n Show a diagonal line leading from the lower-left ot the upper-right; only makes\n sense to use this if use_global_scale is true.\n use_global_scale : bool, default False\n Use same scale for both axes\n vertical_line_x_position : float, default None\n horizontal_line_y_position : float, default None\n show_cross_axes : bool, default True\n If show_axes is False, do we show cross-axes?\n show_extra : bool\n False by default. Show a fourth column listing contexts in the\n extra categories.\n extra_category_name : str, default None\n "Extra" by default. Only active if show_neutral is True and show_extra is True. Name\n of the extra column.\n censor_points : bool, default True\n Don\'t label over points.\n center_label_over_points : bool, default False\n Center a label over points, or try to find a position near a point that\n doesn\'t overlap anything else.\n x_axis_labels: list, default None\n List of string value-labels to show at evenly spaced intervals on the x-axis.\n Low, medium, high are defaults.\n y_axis_labels : list, default None\n List of string value-labels to show at evenly spaced intervals on the y-axis.\n Low, medium, high are defaults.\n topic_model_term_lists : dict default None\n Dict of metadata name (str) -> List of string terms in metadata. These will be bolded\n in query in context results.\n topic_model_preview_size : int default 10\n Number of terms in topic model to show as a preview.\n metadata_descriptions : dict default None\n Dict of metadata name (str) -> str of metadata description. These will be shown when a meta data term is\n clicked.\n vertical_lines : list default None\n List of floats corresponding to points on the x-axis to draw vertical lines\n characteristic_scorer : CharacteristicScorer default None\n Used for bg scores\n term_colors : dict, default None\n Dictionary mapping term to color\n unified_context : bool, default False\n Boolean displays contexts in a single pane as opposed to separate columns.\n show_category_headings : bool, default True\n Show category headings if unified_context is True.\n highlight_selected_category : bool, default False\n Highlight selected category if unified_context is True.\n include_term_category_counts : bool, default False\n Include the termCounts object in the plot definition.\n div_name : str, None by default\n Give the scatterplot div name a non-default value\n alternative_term_func: str, default None\n Javascript function which take a term JSON object and returns a bool. If the return value is true,\n execute standard term click pipeline. Ex.: `\'(function(termDict) {return true;})\'`.\n term_metadata : dict, None by default\n Dict mapping terms to dictionaries containing additional information which can be used in the color_func\n or the get_tooltip_content function. These will appear in termDict.etc\n term_metadata_df : pd.DataFrame, None by default\n Dataframe version of term_metadata\n include_all_contexts: bool, default False\n Include all contexts, even non-matching ones, in interface\n max_overlapping: int, default -1\n Number of overlapping terms to dislay. If -1, display all. (default)\n show_corpus_stats: bool, default True\n Show the corpus stats div\n sort_doc_labels_by_name: bool default False\n If unified, sort the document labels by name\n always_jump: bool, default True\n Always jump to term contexts if a term is clicked\n enable_term_category_description: bool, default True\n List term/metadata statistics under category\n get_custom_term_html: str, default None\n Javascript function which displays term summary from term info\n header_names: Dict[str, str], default None\n Dictionary giving names of term lists shown to the right of the plot. Valid keys are\n upper, lower and right.\n header_sorting_algos: Dict[str, str], default None\n Dictionary giving javascript sorting algorithms for panes. Valid keys are upper, lower\n and right. Value is a JS function which takes the "data" object.\n ignore_categories: bool, default False\n Signals the plot shouldn\'t display category names. Used in single category plots.\n suppress_text_column: str, default None\n Column in term_metadata_df which indicates term should be hidden\n left_list_column: str, default None\n Column in term_metadata_df which should be used for sorting words into upper and lower\n parts of left word-list sections. Highest values in upper, lowest in lower.\n tooltip_columns: List[str]\n tooltip_column_names: Dict[str, str]\n term_description_columns: List[str]\n term_description_column_names: Dict[str]\n term_word_in_term_description: str, default None\n color_column: str, default None:\n column in term_metadata_df which indicates color\n color_score_column: str, default None\n column in term_metadata df; contains value between 0 and 1 which will be used to assign a color\n label_priority_column : str, default None\n Column in term_metadata_df; larger values in the column indicate a term should be labeled first\n censor_point_column : str, default None\n Should we allow labels to be drawn over point?\n right_order_column : str, default None\n Order for right column ("characteristic" by default); largest first\n background_color : str, default None\n Changes document.body\'s background color to background_color\n line_coordinates : list, default None\n Coordinates for drawing a line under the plot\n subword_encoding : str, default None\n Type of subword encoding to use, None if none, currently supports "RoBERTa"\n top_terms_length : int, default 14\n Number of words to list in most/least associated lists on left-hand side\n top_terms_left_buffer : int, default 0\n Number of pixels left to shift top terms list\n dont_filter : bool, default False\n Don\'t filter any terms when charting\n use_offsets : bool, default False\n Enable the use of metadata offsets\n return_data : bool default False\n Return a dict containing the output of `ScatterChartExplorer.to_dict` instead of\n an html.\n return_scatterplot_structure : bool, default False\n return ScatterplotStructure instead of html\n Returns\n -------\n str\n html of visualization\n\n '
if (singleScoreMode or word_vec_use_p_vals):
d3_color_scale = 'd3.interpolatePurples'
if (singleScoreMode or (not sort_by_dist)):
sort_by_dist = False
else:
sort_by_dist = True
if (term_ranker is None):
term_ranker = termranking.AbsoluteFrequencyRanker
(category_name, not_category_name) = get_category_names(category, category_name, not_categories, not_category_name)
if (not_categories is None):
not_categories = [c for c in corpus.get_categories() if (c != category)]
if term_scorer:
scores = get_term_scorer_scores(category, corpus, neutral_categories, not_categories, show_neutral, term_ranker, term_scorer, use_non_text_features)
if (pmi_filter_thresold is not None):
pmi_threshold_coefficient = pmi_filter_thresold
warnings.warn("The argument name 'pmi_filter_thresold' has been deprecated. Use 'pmi_threshold_coefficient' in its place", DeprecationWarning)
if use_non_text_features:
pmi_threshold_coefficient = 0
scatter_chart_explorer = ScatterChartExplorer(corpus, minimum_term_frequency=minimum_term_frequency, minimum_not_category_term_frequency=minimum_not_category_term_frequency, pmi_threshold_coefficient=pmi_threshold_coefficient, filter_unigrams=filter_unigrams, jitter=jitter, max_terms=max_terms, term_ranker=term_ranker, use_non_text_features=use_non_text_features, term_significance=term_significance, terms_to_include=terms_to_include, dont_filter=dont_filter)
if (((x_coords is None) and (y_coords is not None)) or ((y_coords is None) and (x_coords is not None))):
raise Exception('Both x_coords and y_coords need to be passed or both left blank')
if (x_coords is not None):
scatter_chart_explorer.inject_coordinates(x_coords, y_coords, rescale_x=rescale_x, rescale_y=rescale_y, original_x=original_x, original_y=original_y)
if (topic_model_term_lists is not None):
scatter_chart_explorer.inject_metadata_term_lists(topic_model_term_lists)
if (metadata_descriptions is not None):
scatter_chart_explorer.inject_metadata_descriptions(metadata_descriptions)
if (term_colors is not None):
scatter_chart_explorer.inject_term_colors(term_colors)
if ((term_metadata_df is not None) and (term_metadata is not None)):
raise Exception('Both term_metadata_df and term_metadata cannot be values which are not None.')
if (term_metadata_df is not None):
scatter_chart_explorer.inject_term_metadata_df(term_metadata_df)
if (term_metadata is not None):
scatter_chart_explorer.inject_term_metadata(term_metadata)
html_base = None
if semiotic_square:
html_base = get_semiotic_square_html(num_terms_semiotic_square, semiotic_square)
scatter_chart_data = scatter_chart_explorer.to_dict(category=category, category_name=category_name, not_category_name=not_category_name, not_categories=not_categories, transform=transform, scores=scores, max_docs_per_category=max_docs_per_category, metadata=(metadata if (not callable(metadata)) else metadata(corpus)), alternative_text_field=alternative_text_field, neutral_category_name=neutral_category_name, extra_category_name=extra_category_name, neutral_categories=neutral_categories, extra_categories=extra_categories, background_scorer=characteristic_scorer, include_term_category_counts=include_term_category_counts, use_offsets=use_offsets)
if (line_coordinates is not None):
scatter_chart_data['line'] = line_coordinates
if return_data:
return scatter_chart_data
if (tooltip_columns is not None):
assert (get_tooltip_content is None)
get_tooltip_content = get_tooltip_js_function(term_metadata_df, tooltip_column_names, tooltip_columns)
if (term_description_columns is not None):
assert (get_custom_term_html is None)
get_custom_term_html = get_custom_term_info_js_function(term_metadata_df, term_description_column_names, term_description_columns, term_word_in_term_description)
if color_column:
assert (color_func is None)
color_func = ('(function(d) {return d.etc["%s"]})' % color_column)
if color_score_column:
assert (color_func is None)
color_func = ('(function(d) {return %s(d.etc["%s"])})' % ((d3_color_scale if (d3_color_scale is not None) else 'd3.interpolateRdYlBu'), color_score_column))
if (header_sorting_algos is not None):
assert ('upper' not in header_sorting_algos)
assert ('lower' not in header_sorting_algos)
if (left_list_column is not None):
assert (term_metadata_df is not None)
assert (left_list_column in term_metadata_df)
header_sorting_algos = {'upper': (((('((a,b) => b.etc["' + left_list_column) + '"] - a.etc["') + left_list_column) + '"])'), 'lower': (((('((a,b) => a.etc["' + left_list_column) + '"] - b.etc["') + left_list_column) + '"])')}
if (right_order_column is not None):
assert (right_order_column in term_metadata_df)
scatterplot_structure = ScatterplotStructure(VizDataAdapter(scatter_chart_data), width_in_pixels=width_in_pixels, height_in_pixels=height_in_pixels, max_snippets=max_snippets, color=d3_color_scale, grey_zero_scores=gray_zero_scores, sort_by_dist=sort_by_dist, reverse_sort_scores_for_not_category=reverse_sort_scores_for_not_category, use_full_doc=use_full_doc, asian_mode=asian_mode, match_full_line=match_full_line, use_non_text_features=use_non_text_features, show_characteristic=show_characteristic, word_vec_use_p_vals=word_vec_use_p_vals, max_p_val=max_p_val, save_svg_button=save_svg_button, p_value_colors=p_value_colors, x_label=x_label, y_label=y_label, show_top_terms=show_top_terms, show_neutral=show_neutral, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=show_axes, horizontal_line_y_position=horizontal_line_y_position, vertical_line_x_position=vertical_line_x_position, show_extra=show_extra, do_censor_points=censor_points, center_label_over_points=center_label_over_points, x_axis_labels=x_axis_labels, y_axis_labels=y_axis_labels, topic_model_preview_size=topic_model_preview_size, vertical_lines=vertical_lines, unified_context=unified_context, show_category_headings=show_category_headings, highlight_selected_category=highlight_selected_category, show_cross_axes=show_cross_axes, div_name=div_name, alternative_term_func=alternative_term_func, include_all_contexts=include_all_contexts, show_axes_and_cross_hairs=show_axes_and_cross_hairs, show_diagonal=show_diagonal, use_global_scale=use_global_scale, x_axis_values_format=x_axis_values_format, y_axis_values_format=y_axis_values_format, max_overlapping=max_overlapping, show_corpus_stats=show_corpus_stats, sort_doc_labels_by_name=sort_doc_labels_by_name, enable_term_category_description=enable_term_category_description, always_jump=always_jump, get_custom_term_html=get_custom_term_html, header_names=header_names, header_sorting_algos=header_sorting_algos, ignore_categories=ignore_categories, background_labels=background_labels, label_priority_column=label_priority_column, text_color_column=text_color_column, suppress_text_column=suppress_text_column, background_color=background_color, censor_point_column=censor_point_column, right_order_column=right_order_column, subword_encoding=subword_encoding, top_terms_length=top_terms_length, top_terms_left_buffer=top_terms_left_buffer)
if return_scatterplot_structure:
return scatterplot_structure
return BasicHTMLFromScatterplotStructure(scatterplot_structure).to_html(protocol=protocol, d3_url=d3_url, d3_scale_chromatic_url=d3_scale_chromatic_url, html_base=html_base)<|docstring|>Returns html code of visualization.
Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
Optional, defaults to category name.
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
Optional defaults to "N(n)ot " + category_name, with the case of the 'n' dependent
on the case of the first letter in category_name.
protocol : str, optional
Protocol to use. Either http or https. Default is https.
pmi_threshold_coefficient : int, optional
Filter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 6
minimum_term_frequency : int, optional
Minimum number of times word needs to appear to make it into visualization.
minimum_not_category_term_frequency : int, optional
If an n-gram does not occur in the category, minimum times it
must been seen to be included. Default is 0.
max_terms : int, optional
Maximum number of terms to include in visualization.
filter_unigrams : bool, optional
Default False, do we filter out unigrams that only occur in one bigram
width_in_pixels : int, optional
Width of viz in pixels, if None, default to JS's choice
height_in_pixels : int, optional
Height of viz in pixels, if None, default to JS's choice
max_snippets : int, optional
Maximum number of snippets to show when term is clicked. If None, all are shown.
max_docs_per_category: int, optional
Maximum number of documents to store per category. If None, by default, all are stored.
metadata : list or function, optional
list of meta data strings that will be included for each document, if a function, called on corpus
scores : np.array, optional
Array of term scores or None.
x_coords : np.array, optional
Array of term x-axis positions or None. Must be in [0,1].
If present, y_coords must also be present.
y_coords : np.array, optional
Array of term y-axis positions or None. Must be in [0,1].
If present, x_coords must also be present.
original_x : array-like
Original, unscaled x-values. Defaults to x_coords
original_y : array-like
Original, unscaled y-values. Defaults to y_coords
rescale_x : lambda list[0,1]: list[0,1], optional
Array of term x-axis positions or None. Must be in [0,1].
Rescales x-axis after filtering
rescale_y : lambda list[0,1]: list[0,1], optional
Array of term y-axis positions or None. Must be in [0,1].
Rescales y-axis after filtering
singleScoreMode : bool, optional
Label terms based on score vs distance from corner. Good for topic scores. Show only one color.
sort_by_dist: bool, optional
Label terms based distance from corner. True by default. Negated by singleScoreMode.
reverse_sort_scores_for_not_category: bool, optional
If using a custom score, score the not-category class by
lowest-score-as-most-predictive. Turn this off for word vector
or topic similarity. Default True.
use_full_doc : bool, optional
Use the full document in snippets. False by default.
transform : function, optional
not recommended for editing. change the way terms are ranked. default is st.Scalers.percentile_ordinal
jitter : float, optional
percentage of axis to jitter each point. default is 0.
gray_zero_scores : bool, optional
If True, color points with zero-scores a light shade of grey. False by default.
term_ranker : TermRanker, optional
TermRanker class for determining term frequency ranks.
asian_mode : bool, optional
Use a special Javascript regular expression that's specific to chinese or japanese
match_full_line : bool, optional
Has the javascript regex match the full line instead of part of it
use_non_text_features : bool, optional
Show non-bag-of-words features (e.g., Empath) instead of text. False by default.
show_top_terms : bool, default True
Show top terms on the left-hand side of the visualization
show_characteristic: bool, default True
Show characteristic terms on the far left-hand side of the visualization
word_vec_use_p_vals: bool, default False
Sort by harmonic mean of score and distance.
max_p_val : float, default 0.1
If word_vec_use_p_vals, the minimum p val to use.
p_value_colors : bool, default False
Color points differently if p val is above 1-max_p_val, below max_p_val, or
in between.
term_significance : TermSignificance instance or None
Way of getting signfiance scores. If None, p values will not be added.
save_svg_button : bool, default False
Add a save as SVG button to the page.
x_label : str, default None
Custom x-axis label
y_label : str, default None
Custom y-axis label
d3_url, str, None by default. The url (or path) of d3.
URL of d3, to be inserted into <script src="..."/>. Overrides `protocol`.
By default, this is `DEFAULT_D3_URL` declared in `ScatterplotStructure`.
d3_scale_chromatic_url, str, None by default. Overrides `protocol`.
URL of d3 scale chromatic, to be inserted into <script src="..."/>
By default, this is `DEFAULT_D3_SCALE_CHROMATIC` declared in `ScatterplotStructure`.
pmi_filter_thresold : (DEPRECATED) int, None by default
DEPRECATED. Use pmi_threshold_coefficient instead.
alternative_text_field : str or None, optional
Field in from dataframe used to make corpus to display in place of parsed text. Only
can be used if corpus is a ParsedCorpus instance.
terms_to_include : list or None, optional
Whitelist of terms to include in visualization.
semiotic_square : SemioticSquareBase
None by default. SemioticSquare based on corpus. Includes square above visualization.
num_terms_semiotic_square : int
10 by default. Number of terms to show in semiotic square.
Only active if semiotic square is present.
not_categories : list
All categories other than category by default. Documents labeled
with remaining category.
neutral_categories : list
[] by default. Documents labeled neutral.
extra_categories : list
[] by default. Documents labeled extra.
show_neutral : bool
False by default. Show a third column listing contexts in the
neutral categories.
neutral_category_name : str
"Neutral" by default. Only active if show_neutral is True. Name of the neutral
column.
get_tooltip_content : str
Javascript function to control content of tooltip. Function takes a parameter
which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and
returns a string.
x_axis_values : list, default None
Value-labels to show on x-axis. Low, medium, high are defaults.
y_axis_values : list, default None
Value-labels to show on y-axis. Low, medium, high are defaults.
x_axis_values_format : str, default None
d3 format of x-axis values
y_axis_values_format : str, default None
d3 format of y-axis values
color_func : str, default None
Javascript function to control color of a point. Function takes a parameter
which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and
returns a string.
term_scorer : Object, default None
In lieu of scores, object with a get_scores(a,b) function that returns a set of scores,
where a and b are term counts. Scorer optionally has a get_term_freqs function. Also could be a
CorpusBasedTermScorer instance.
show_axes : bool, default True
Show the ticked axes on the plot. If false, show inner axes as a crosshair.
show_axes_and_cross_hairs : bool, default False
Show both peripheral axis labels and cross axes.
show_diagonal : bool, default False
Show a diagonal line leading from the lower-left ot the upper-right; only makes
sense to use this if use_global_scale is true.
use_global_scale : bool, default False
Use same scale for both axes
vertical_line_x_position : float, default None
horizontal_line_y_position : float, default None
show_cross_axes : bool, default True
If show_axes is False, do we show cross-axes?
show_extra : bool
False by default. Show a fourth column listing contexts in the
extra categories.
extra_category_name : str, default None
"Extra" by default. Only active if show_neutral is True and show_extra is True. Name
of the extra column.
censor_points : bool, default True
Don't label over points.
center_label_over_points : bool, default False
Center a label over points, or try to find a position near a point that
doesn't overlap anything else.
x_axis_labels: list, default None
List of string value-labels to show at evenly spaced intervals on the x-axis.
Low, medium, high are defaults.
y_axis_labels : list, default None
List of string value-labels to show at evenly spaced intervals on the y-axis.
Low, medium, high are defaults.
topic_model_term_lists : dict default None
Dict of metadata name (str) -> List of string terms in metadata. These will be bolded
in query in context results.
topic_model_preview_size : int default 10
Number of terms in topic model to show as a preview.
metadata_descriptions : dict default None
Dict of metadata name (str) -> str of metadata description. These will be shown when a meta data term is
clicked.
vertical_lines : list default None
List of floats corresponding to points on the x-axis to draw vertical lines
characteristic_scorer : CharacteristicScorer default None
Used for bg scores
term_colors : dict, default None
Dictionary mapping term to color
unified_context : bool, default False
Boolean displays contexts in a single pane as opposed to separate columns.
show_category_headings : bool, default True
Show category headings if unified_context is True.
highlight_selected_category : bool, default False
Highlight selected category if unified_context is True.
include_term_category_counts : bool, default False
Include the termCounts object in the plot definition.
div_name : str, None by default
Give the scatterplot div name a non-default value
alternative_term_func: str, default None
Javascript function which take a term JSON object and returns a bool. If the return value is true,
execute standard term click pipeline. Ex.: `'(function(termDict) {return true;})'`.
term_metadata : dict, None by default
Dict mapping terms to dictionaries containing additional information which can be used in the color_func
or the get_tooltip_content function. These will appear in termDict.etc
term_metadata_df : pd.DataFrame, None by default
Dataframe version of term_metadata
include_all_contexts: bool, default False
Include all contexts, even non-matching ones, in interface
max_overlapping: int, default -1
Number of overlapping terms to dislay. If -1, display all. (default)
show_corpus_stats: bool, default True
Show the corpus stats div
sort_doc_labels_by_name: bool default False
If unified, sort the document labels by name
always_jump: bool, default True
Always jump to term contexts if a term is clicked
enable_term_category_description: bool, default True
List term/metadata statistics under category
get_custom_term_html: str, default None
Javascript function which displays term summary from term info
header_names: Dict[str, str], default None
Dictionary giving names of term lists shown to the right of the plot. Valid keys are
upper, lower and right.
header_sorting_algos: Dict[str, str], default None
Dictionary giving javascript sorting algorithms for panes. Valid keys are upper, lower
and right. Value is a JS function which takes the "data" object.
ignore_categories: bool, default False
Signals the plot shouldn't display category names. Used in single category plots.
suppress_text_column: str, default None
Column in term_metadata_df which indicates term should be hidden
left_list_column: str, default None
Column in term_metadata_df which should be used for sorting words into upper and lower
parts of left word-list sections. Highest values in upper, lowest in lower.
tooltip_columns: List[str]
tooltip_column_names: Dict[str, str]
term_description_columns: List[str]
term_description_column_names: Dict[str]
term_word_in_term_description: str, default None
color_column: str, default None:
column in term_metadata_df which indicates color
color_score_column: str, default None
column in term_metadata df; contains value between 0 and 1 which will be used to assign a color
label_priority_column : str, default None
Column in term_metadata_df; larger values in the column indicate a term should be labeled first
censor_point_column : str, default None
Should we allow labels to be drawn over point?
right_order_column : str, default None
Order for right column ("characteristic" by default); largest first
background_color : str, default None
Changes document.body's background color to background_color
line_coordinates : list, default None
Coordinates for drawing a line under the plot
subword_encoding : str, default None
Type of subword encoding to use, None if none, currently supports "RoBERTa"
top_terms_length : int, default 14
Number of words to list in most/least associated lists on left-hand side
top_terms_left_buffer : int, default 0
Number of pixels left to shift top terms list
dont_filter : bool, default False
Don't filter any terms when charting
use_offsets : bool, default False
Enable the use of metadata offsets
return_data : bool default False
Return a dict containing the output of `ScatterChartExplorer.to_dict` instead of
an html.
return_scatterplot_structure : bool, default False
return ScatterplotStructure instead of html
Returns
-------
str
html of visualization<|endoftext|> |
a7c967290024c048739dd95473368eecdb2f670f17d77c502f3c560f877016f4 | def produce_scattertext_html(term_doc_matrix, category, category_name, not_category_name, protocol='https', minimum_term_frequency=DEFAULT_MINIMUM_TERM_FREQUENCY, pmi_threshold_coefficient=DEFAULT_PMI_THRESHOLD_COEFFICIENT, max_terms=None, filter_unigrams=False, height_in_pixels=None, width_in_pixels=None, term_ranker=termranking.AbsoluteFrequencyRanker):
"Returns html code of visualization.\n\n Parameters\n ----------\n term_doc_matrix : TermDocMatrix\n Corpus to use\n category : str\n name of category column\n category_name: str\n name of category to mine for\n not_category_name: str\n name of everything that isn't in category\n protocol : str\n optional, used prototcol of , http or https\n minimum_term_frequency : int, optional\n Minimum number of times word needs to appear to make it into visualization.\n pmi_threshold_coefficient : int, optional\n Filter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 6.\n max_terms : int, optional\n Maximum number of terms to include in visualization.\n filter_unigrams : bool\n default False, do we filter unigrams that only occur in one bigram\n width_in_pixels: int\n width of viz in pixels, if None, default to JS's choice\n height_in_pixels: int\n height of viz in pixels, if None, default to JS's choice\n term_ranker : TermRanker\n TermRanker class for determining term frequency ranks.\n\n Returns\n -------\n str, html of visualization\n "
scatter_chart_data = ScatterChart(term_doc_matrix=term_doc_matrix, minimum_term_frequency=minimum_term_frequency, pmi_threshold_coefficient=pmi_threshold_coefficient, filter_unigrams=filter_unigrams, max_terms=max_terms, term_ranker=term_ranker).to_dict(category=category, category_name=category_name, not_category_name=not_category_name, transform=percentile_alphabetical)
scatterplot_structure = ScatterplotStructure(VizDataAdapter(scatter_chart_data), width_in_pixels, height_in_pixels)
return BasicHTMLFromScatterplotStructure(scatterplot_structure).to_html(protocol=protocol) | Returns html code of visualization.
Parameters
----------
term_doc_matrix : TermDocMatrix
Corpus to use
category : str
name of category column
category_name: str
name of category to mine for
not_category_name: str
name of everything that isn't in category
protocol : str
optional, used prototcol of , http or https
minimum_term_frequency : int, optional
Minimum number of times word needs to appear to make it into visualization.
pmi_threshold_coefficient : int, optional
Filter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 6.
max_terms : int, optional
Maximum number of terms to include in visualization.
filter_unigrams : bool
default False, do we filter unigrams that only occur in one bigram
width_in_pixels: int
width of viz in pixels, if None, default to JS's choice
height_in_pixels: int
height of viz in pixels, if None, default to JS's choice
term_ranker : TermRanker
TermRanker class for determining term frequency ranks.
Returns
-------
str, html of visualization | scattertext/__init__.py | produce_scattertext_html | JasonKessler/scattertext | 1,823 | python | def produce_scattertext_html(term_doc_matrix, category, category_name, not_category_name, protocol='https', minimum_term_frequency=DEFAULT_MINIMUM_TERM_FREQUENCY, pmi_threshold_coefficient=DEFAULT_PMI_THRESHOLD_COEFFICIENT, max_terms=None, filter_unigrams=False, height_in_pixels=None, width_in_pixels=None, term_ranker=termranking.AbsoluteFrequencyRanker):
"Returns html code of visualization.\n\n Parameters\n ----------\n term_doc_matrix : TermDocMatrix\n Corpus to use\n category : str\n name of category column\n category_name: str\n name of category to mine for\n not_category_name: str\n name of everything that isn't in category\n protocol : str\n optional, used prototcol of , http or https\n minimum_term_frequency : int, optional\n Minimum number of times word needs to appear to make it into visualization.\n pmi_threshold_coefficient : int, optional\n Filter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 6.\n max_terms : int, optional\n Maximum number of terms to include in visualization.\n filter_unigrams : bool\n default False, do we filter unigrams that only occur in one bigram\n width_in_pixels: int\n width of viz in pixels, if None, default to JS's choice\n height_in_pixels: int\n height of viz in pixels, if None, default to JS's choice\n term_ranker : TermRanker\n TermRanker class for determining term frequency ranks.\n\n Returns\n -------\n str, html of visualization\n "
scatter_chart_data = ScatterChart(term_doc_matrix=term_doc_matrix, minimum_term_frequency=minimum_term_frequency, pmi_threshold_coefficient=pmi_threshold_coefficient, filter_unigrams=filter_unigrams, max_terms=max_terms, term_ranker=term_ranker).to_dict(category=category, category_name=category_name, not_category_name=not_category_name, transform=percentile_alphabetical)
scatterplot_structure = ScatterplotStructure(VizDataAdapter(scatter_chart_data), width_in_pixels, height_in_pixels)
return BasicHTMLFromScatterplotStructure(scatterplot_structure).to_html(protocol=protocol) | def produce_scattertext_html(term_doc_matrix, category, category_name, not_category_name, protocol='https', minimum_term_frequency=DEFAULT_MINIMUM_TERM_FREQUENCY, pmi_threshold_coefficient=DEFAULT_PMI_THRESHOLD_COEFFICIENT, max_terms=None, filter_unigrams=False, height_in_pixels=None, width_in_pixels=None, term_ranker=termranking.AbsoluteFrequencyRanker):
"Returns html code of visualization.\n\n Parameters\n ----------\n term_doc_matrix : TermDocMatrix\n Corpus to use\n category : str\n name of category column\n category_name: str\n name of category to mine for\n not_category_name: str\n name of everything that isn't in category\n protocol : str\n optional, used prototcol of , http or https\n minimum_term_frequency : int, optional\n Minimum number of times word needs to appear to make it into visualization.\n pmi_threshold_coefficient : int, optional\n Filter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 6.\n max_terms : int, optional\n Maximum number of terms to include in visualization.\n filter_unigrams : bool\n default False, do we filter unigrams that only occur in one bigram\n width_in_pixels: int\n width of viz in pixels, if None, default to JS's choice\n height_in_pixels: int\n height of viz in pixels, if None, default to JS's choice\n term_ranker : TermRanker\n TermRanker class for determining term frequency ranks.\n\n Returns\n -------\n str, html of visualization\n "
scatter_chart_data = ScatterChart(term_doc_matrix=term_doc_matrix, minimum_term_frequency=minimum_term_frequency, pmi_threshold_coefficient=pmi_threshold_coefficient, filter_unigrams=filter_unigrams, max_terms=max_terms, term_ranker=term_ranker).to_dict(category=category, category_name=category_name, not_category_name=not_category_name, transform=percentile_alphabetical)
scatterplot_structure = ScatterplotStructure(VizDataAdapter(scatter_chart_data), width_in_pixels, height_in_pixels)
return BasicHTMLFromScatterplotStructure(scatterplot_structure).to_html(protocol=protocol)<|docstring|>Returns html code of visualization.
Parameters
----------
term_doc_matrix : TermDocMatrix
Corpus to use
category : str
name of category column
category_name: str
name of category to mine for
not_category_name: str
name of everything that isn't in category
protocol : str
optional, used prototcol of , http or https
minimum_term_frequency : int, optional
Minimum number of times word needs to appear to make it into visualization.
pmi_threshold_coefficient : int, optional
Filter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 6.
max_terms : int, optional
Maximum number of terms to include in visualization.
filter_unigrams : bool
default False, do we filter unigrams that only occur in one bigram
width_in_pixels: int
width of viz in pixels, if None, default to JS's choice
height_in_pixels: int
height of viz in pixels, if None, default to JS's choice
term_ranker : TermRanker
TermRanker class for determining term frequency ranks.
Returns
-------
str, html of visualization<|endoftext|> |
aacf3801ac081b1cc0251bbc08fe587a5c3f08db0e470c34ee5d13d4ff41bfbe | def get_semiotic_square_html(num_terms_semiotic_square, semiotic_square):
'\n\n :param num_terms_semiotic_square: int\n :param semiotic_square: SemioticSquare\n :return: str\n '
semiotic_square_html = None
if semiotic_square:
semiotic_square_viz = HTMLSemioticSquareViz(semiotic_square)
if num_terms_semiotic_square:
semiotic_square_html = semiotic_square_viz.get_html(num_terms_semiotic_square)
else:
semiotic_square_html = semiotic_square_viz.get_html()
return semiotic_square_html | :param num_terms_semiotic_square: int
:param semiotic_square: SemioticSquare
:return: str | scattertext/__init__.py | get_semiotic_square_html | JasonKessler/scattertext | 1,823 | python | def get_semiotic_square_html(num_terms_semiotic_square, semiotic_square):
'\n\n :param num_terms_semiotic_square: int\n :param semiotic_square: SemioticSquare\n :return: str\n '
semiotic_square_html = None
if semiotic_square:
semiotic_square_viz = HTMLSemioticSquareViz(semiotic_square)
if num_terms_semiotic_square:
semiotic_square_html = semiotic_square_viz.get_html(num_terms_semiotic_square)
else:
semiotic_square_html = semiotic_square_viz.get_html()
return semiotic_square_html | def get_semiotic_square_html(num_terms_semiotic_square, semiotic_square):
'\n\n :param num_terms_semiotic_square: int\n :param semiotic_square: SemioticSquare\n :return: str\n '
semiotic_square_html = None
if semiotic_square:
semiotic_square_viz = HTMLSemioticSquareViz(semiotic_square)
if num_terms_semiotic_square:
semiotic_square_html = semiotic_square_viz.get_html(num_terms_semiotic_square)
else:
semiotic_square_html = semiotic_square_viz.get_html()
return semiotic_square_html<|docstring|>:param num_terms_semiotic_square: int
:param semiotic_square: SemioticSquare
:return: str<|endoftext|> |
291fa0eec5af1e76d416d826d90468f885bb7e7cf93e14dd80e1b817828fc2a4 | def word_similarity_explorer_gensim(corpus, category, target_term, category_name=None, not_category_name=None, word2vec=None, alpha=0.01, max_p_val=0.1, term_significance=None, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n target_term : str\n Word or phrase for semantic similarity comparison\n word2vec : word2vec.Word2Vec\n Gensim-compatible Word2Vec model of lower-cased corpus. If none, o\n ne will be trained using Word2VecFromParsedCorpus(corpus).train()\n alpha : float, default = 0.01\n Uniform dirichlet prior for p-value calculation\n max_p_val : float, default = 0.1\n Max p-val to use find set of terms for similarity calculation\n term_significance : TermSignificance\n Significance finder\n\n Remaining arguments are from `produce_scattertext_explorer`.\n Returns\n -------\n str, html of visualization\n '
if (word2vec is None):
word2vec = Word2VecFromParsedCorpus(corpus).train()
if (term_significance is None):
term_significance = LogOddsRatioUninformativeDirichletPrior(alpha)
assert issubclass(type(term_significance), TermSignificance)
scores = []
for tok in corpus._term_idx_store._i2val:
try:
scores.append(word2vec.similarity(target_term, tok.replace(' ', '_')))
except:
try:
scores.append(np.mean([word2vec.similarity(target_term, tok_part) for tok_part in tok.split()]))
except:
scores.append(0)
scores = np.array(scores)
return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, reverse_sort_scores_for_not_category=False, word_vec_use_p_vals=True, term_significance=term_significance, max_p_val=max_p_val, p_value_colors=True, **kwargs) | Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
target_term : str
Word or phrase for semantic similarity comparison
word2vec : word2vec.Word2Vec
Gensim-compatible Word2Vec model of lower-cased corpus. If none, o
ne will be trained using Word2VecFromParsedCorpus(corpus).train()
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
max_p_val : float, default = 0.1
Max p-val to use find set of terms for similarity calculation
term_significance : TermSignificance
Significance finder
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization | scattertext/__init__.py | word_similarity_explorer_gensim | JasonKessler/scattertext | 1,823 | python | def word_similarity_explorer_gensim(corpus, category, target_term, category_name=None, not_category_name=None, word2vec=None, alpha=0.01, max_p_val=0.1, term_significance=None, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n target_term : str\n Word or phrase for semantic similarity comparison\n word2vec : word2vec.Word2Vec\n Gensim-compatible Word2Vec model of lower-cased corpus. If none, o\n ne will be trained using Word2VecFromParsedCorpus(corpus).train()\n alpha : float, default = 0.01\n Uniform dirichlet prior for p-value calculation\n max_p_val : float, default = 0.1\n Max p-val to use find set of terms for similarity calculation\n term_significance : TermSignificance\n Significance finder\n\n Remaining arguments are from `produce_scattertext_explorer`.\n Returns\n -------\n str, html of visualization\n '
if (word2vec is None):
word2vec = Word2VecFromParsedCorpus(corpus).train()
if (term_significance is None):
term_significance = LogOddsRatioUninformativeDirichletPrior(alpha)
assert issubclass(type(term_significance), TermSignificance)
scores = []
for tok in corpus._term_idx_store._i2val:
try:
scores.append(word2vec.similarity(target_term, tok.replace(' ', '_')))
except:
try:
scores.append(np.mean([word2vec.similarity(target_term, tok_part) for tok_part in tok.split()]))
except:
scores.append(0)
scores = np.array(scores)
return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, reverse_sort_scores_for_not_category=False, word_vec_use_p_vals=True, term_significance=term_significance, max_p_val=max_p_val, p_value_colors=True, **kwargs) | def word_similarity_explorer_gensim(corpus, category, target_term, category_name=None, not_category_name=None, word2vec=None, alpha=0.01, max_p_val=0.1, term_significance=None, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n target_term : str\n Word or phrase for semantic similarity comparison\n word2vec : word2vec.Word2Vec\n Gensim-compatible Word2Vec model of lower-cased corpus. If none, o\n ne will be trained using Word2VecFromParsedCorpus(corpus).train()\n alpha : float, default = 0.01\n Uniform dirichlet prior for p-value calculation\n max_p_val : float, default = 0.1\n Max p-val to use find set of terms for similarity calculation\n term_significance : TermSignificance\n Significance finder\n\n Remaining arguments are from `produce_scattertext_explorer`.\n Returns\n -------\n str, html of visualization\n '
if (word2vec is None):
word2vec = Word2VecFromParsedCorpus(corpus).train()
if (term_significance is None):
term_significance = LogOddsRatioUninformativeDirichletPrior(alpha)
assert issubclass(type(term_significance), TermSignificance)
scores = []
for tok in corpus._term_idx_store._i2val:
try:
scores.append(word2vec.similarity(target_term, tok.replace(' ', '_')))
except:
try:
scores.append(np.mean([word2vec.similarity(target_term, tok_part) for tok_part in tok.split()]))
except:
scores.append(0)
scores = np.array(scores)
return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, reverse_sort_scores_for_not_category=False, word_vec_use_p_vals=True, term_significance=term_significance, max_p_val=max_p_val, p_value_colors=True, **kwargs)<|docstring|>Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
target_term : str
Word or phrase for semantic similarity comparison
word2vec : word2vec.Word2Vec
Gensim-compatible Word2Vec model of lower-cased corpus. If none, o
ne will be trained using Word2VecFromParsedCorpus(corpus).train()
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
max_p_val : float, default = 0.1
Max p-val to use find set of terms for similarity calculation
term_significance : TermSignificance
Significance finder
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization<|endoftext|> |
9c328fd966582a8a4e79104b9610a923b16c6fbaac40d81d0740614a77311b1b | def word_similarity_explorer(corpus, category, category_name, not_category_name, target_term, nlp=None, alpha=0.01, max_p_val=0.1, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n target_term : str\n Word or phrase for semantic similarity comparison\n nlp : spaCy-like parsing function\n E.g., spacy.load(\'en_core_web_sm\'), whitespace_nlp, etc...\n alpha : float, default = 0.01\n Uniform dirichlet prior for p-value calculation\n max_p_val : float, default = 0.1\n Max p-val to use find set of terms for similarity calculation\n Remaining arguments are from `produce_scattertext_explorer`.\n Returns\n -------\n str, html of visualization\n '
if (nlp is None):
import spacy
nlp = spacy.load('en_core_web_sm')
base_term = nlp(target_term)
scores = np.array([base_term.similarity(nlp(tok)) for tok in corpus._term_idx_store._i2val])
return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, reverse_sort_scores_for_not_category=False, word_vec_use_p_vals=True, term_significance=LogOddsRatioUninformativeDirichletPrior(alpha), max_p_val=max_p_val, p_value_colors=True, **kwargs) | Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
target_term : str
Word or phrase for semantic similarity comparison
nlp : spaCy-like parsing function
E.g., spacy.load('en_core_web_sm'), whitespace_nlp, etc...
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
max_p_val : float, default = 0.1
Max p-val to use find set of terms for similarity calculation
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization | scattertext/__init__.py | word_similarity_explorer | JasonKessler/scattertext | 1,823 | python | def word_similarity_explorer(corpus, category, category_name, not_category_name, target_term, nlp=None, alpha=0.01, max_p_val=0.1, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n target_term : str\n Word or phrase for semantic similarity comparison\n nlp : spaCy-like parsing function\n E.g., spacy.load(\'en_core_web_sm\'), whitespace_nlp, etc...\n alpha : float, default = 0.01\n Uniform dirichlet prior for p-value calculation\n max_p_val : float, default = 0.1\n Max p-val to use find set of terms for similarity calculation\n Remaining arguments are from `produce_scattertext_explorer`.\n Returns\n -------\n str, html of visualization\n '
if (nlp is None):
import spacy
nlp = spacy.load('en_core_web_sm')
base_term = nlp(target_term)
scores = np.array([base_term.similarity(nlp(tok)) for tok in corpus._term_idx_store._i2val])
return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, reverse_sort_scores_for_not_category=False, word_vec_use_p_vals=True, term_significance=LogOddsRatioUninformativeDirichletPrior(alpha), max_p_val=max_p_val, p_value_colors=True, **kwargs) | def word_similarity_explorer(corpus, category, category_name, not_category_name, target_term, nlp=None, alpha=0.01, max_p_val=0.1, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n target_term : str\n Word or phrase for semantic similarity comparison\n nlp : spaCy-like parsing function\n E.g., spacy.load(\'en_core_web_sm\'), whitespace_nlp, etc...\n alpha : float, default = 0.01\n Uniform dirichlet prior for p-value calculation\n max_p_val : float, default = 0.1\n Max p-val to use find set of terms for similarity calculation\n Remaining arguments are from `produce_scattertext_explorer`.\n Returns\n -------\n str, html of visualization\n '
if (nlp is None):
import spacy
nlp = spacy.load('en_core_web_sm')
base_term = nlp(target_term)
scores = np.array([base_term.similarity(nlp(tok)) for tok in corpus._term_idx_store._i2val])
return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, reverse_sort_scores_for_not_category=False, word_vec_use_p_vals=True, term_significance=LogOddsRatioUninformativeDirichletPrior(alpha), max_p_val=max_p_val, p_value_colors=True, **kwargs)<|docstring|>Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
target_term : str
Word or phrase for semantic similarity comparison
nlp : spaCy-like parsing function
E.g., spacy.load('en_core_web_sm'), whitespace_nlp, etc...
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
max_p_val : float, default = 0.1
Max p-val to use find set of terms for similarity calculation
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization<|endoftext|> |
ced7867293f0cae8ba22766ee982a465a2c07b5431b64f29ab1fb6ecad88eae2 | def produce_frequency_explorer(corpus, category, category_name=None, not_category_name=None, term_ranker=termranking.AbsoluteFrequencyRanker, alpha=0.01, use_term_significance=False, term_scorer=None, not_categories=None, grey_threshold=0, y_axis_values=None, frequency_transform=(lambda x: scale((np.log(x) - np.log(1)))), **kwargs):
'\n Produces a Monroe et al. style visualization, with the x-axis being the log frequency\n\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str or None\n Name of category to use. E.g., "5-star reviews."\n Defaults to category\n not_category_name : str or None\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n Defaults to "Not " + category_name\n term_ranker : TermRanker\n TermRanker class for determining term frequency ranks.\n alpha : float, default = 0.01\n Uniform dirichlet prior for p-value calculation\n use_term_significance : bool, True by default\n Use term scorer\n term_scorer : TermSignificance\n Subclass of TermSignificance to use as for scores and significance\n not_categories : list\n All categories other than category by default. Documents labeled\n with remaining category.\n grey_threshold : float\n Score to grey points. Default is 1.96\n y_axis_values : list\n Custom y-axis values. Defaults to linspace\n frequency_transfom : lambda, default lambda x: scale(np.log(x) - np.log(1))\n Takes a vector of frequencies and returns their x-axis scale.\n Remaining arguments are from `produce_scattertext_explorer`.\'\n Returns\n -------\n str, html of visualization\n '
if (not_categories is None):
not_categories = [c for c in corpus.get_categories() if (c != category)]
if (term_scorer is None):
term_scorer = LogOddsRatioUninformativeDirichletPrior(alpha)
my_term_ranker = term_ranker(corpus)
if kwargs.get('use_non_text_features', False):
my_term_ranker.use_non_text_features()
term_freq_df = (my_term_ranker.get_ranks() + 1)
freqs = term_freq_df[[(c + ' freq') for c in ([category] + not_categories)]].sum(axis=1).values
x_axis_values = [round_downer((10 ** x)) for x in np.linspace(0, (np.log(freqs.max()) / np.log(10)), 5)]
x_axis_values = [x for x in x_axis_values if ((x > 1) and (x <= freqs.max()))]
frequencies_log_scaled = frequency_transform(freqs)
if ('scores' not in kwargs):
kwargs['scores'] = get_term_scorer_scores(category, corpus, kwargs.get('neutral_categories', False), not_categories, kwargs.get('show_neutral', False), term_ranker, term_scorer, kwargs.get('use_non_text_features', False))
def y_axis_rescale(coords):
return ((((coords - 0.5) / np.abs((coords - 0.5)).max()) + 1) / 2)
def round_to_1(x):
if (x == 0):
return 0
return round(x, (- int(np.floor(np.log10(abs(x))))))
if (y_axis_values is None):
max_score = (np.floor((np.max(kwargs['scores']) * 100)) / 100)
min_score = (np.ceil((np.min(kwargs['scores']) * 100)) / 100)
if ((min_score < 0) and (max_score > 0)):
central = 0
else:
central = 0.5
y_axis_values = [x for x in [min_score, central, max_score] if ((x >= min_score) and (x <= max_score))]
scores_scaled_for_charting = scale_neg_1_to_1_with_zero_mean_abs_max(kwargs['scores'])
if use_term_significance:
kwargs['term_significance'] = term_scorer
kwargs['y_label'] = kwargs.get('y_label', term_scorer.get_name())
kwargs['color_func'] = kwargs.get('color_func', ('(function(d) {\n\treturn (Math.abs(d.os) < %s) \n\t ? d3.interpolate(d3.rgb(230, 230, 230), d3.rgb(130, 130, 130))(Math.abs(d.os)/%s) \n\t : d3.interpolateRdYlBu(d.y);\n\t})' % (grey_threshold, grey_threshold)))
return produce_scattertext_explorer(corpus, category=category, category_name=category_name, not_category_name=not_category_name, x_coords=frequencies_log_scaled, y_coords=scores_scaled_for_charting, original_x=freqs, original_y=kwargs['scores'], x_axis_values=x_axis_values, y_axis_values=y_axis_values, rescale_x=scale, rescale_y=y_axis_rescale, sort_by_dist=False, term_ranker=term_ranker, not_categories=not_categories, x_label=kwargs.get('x_label', 'Log Frequency'), **kwargs) | Produces a Monroe et al. style visualization, with the x-axis being the log frequency
Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str or None
Name of category to use. E.g., "5-star reviews."
Defaults to category
not_category_name : str or None
Name of everything that isn't in category. E.g., "Below 5-star reviews".
Defaults to "Not " + category_name
term_ranker : TermRanker
TermRanker class for determining term frequency ranks.
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
use_term_significance : bool, True by default
Use term scorer
term_scorer : TermSignificance
Subclass of TermSignificance to use as for scores and significance
not_categories : list
All categories other than category by default. Documents labeled
with remaining category.
grey_threshold : float
Score to grey points. Default is 1.96
y_axis_values : list
Custom y-axis values. Defaults to linspace
frequency_transfom : lambda, default lambda x: scale(np.log(x) - np.log(1))
Takes a vector of frequencies and returns their x-axis scale.
Remaining arguments are from `produce_scattertext_explorer`.'
Returns
-------
str, html of visualization | scattertext/__init__.py | produce_frequency_explorer | JasonKessler/scattertext | 1,823 | python | def produce_frequency_explorer(corpus, category, category_name=None, not_category_name=None, term_ranker=termranking.AbsoluteFrequencyRanker, alpha=0.01, use_term_significance=False, term_scorer=None, not_categories=None, grey_threshold=0, y_axis_values=None, frequency_transform=(lambda x: scale((np.log(x) - np.log(1)))), **kwargs):
'\n Produces a Monroe et al. style visualization, with the x-axis being the log frequency\n\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str or None\n Name of category to use. E.g., "5-star reviews."\n Defaults to category\n not_category_name : str or None\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n Defaults to "Not " + category_name\n term_ranker : TermRanker\n TermRanker class for determining term frequency ranks.\n alpha : float, default = 0.01\n Uniform dirichlet prior for p-value calculation\n use_term_significance : bool, True by default\n Use term scorer\n term_scorer : TermSignificance\n Subclass of TermSignificance to use as for scores and significance\n not_categories : list\n All categories other than category by default. Documents labeled\n with remaining category.\n grey_threshold : float\n Score to grey points. Default is 1.96\n y_axis_values : list\n Custom y-axis values. Defaults to linspace\n frequency_transfom : lambda, default lambda x: scale(np.log(x) - np.log(1))\n Takes a vector of frequencies and returns their x-axis scale.\n Remaining arguments are from `produce_scattertext_explorer`.\'\n Returns\n -------\n str, html of visualization\n '
if (not_categories is None):
not_categories = [c for c in corpus.get_categories() if (c != category)]
if (term_scorer is None):
term_scorer = LogOddsRatioUninformativeDirichletPrior(alpha)
my_term_ranker = term_ranker(corpus)
if kwargs.get('use_non_text_features', False):
my_term_ranker.use_non_text_features()
term_freq_df = (my_term_ranker.get_ranks() + 1)
freqs = term_freq_df[[(c + ' freq') for c in ([category] + not_categories)]].sum(axis=1).values
x_axis_values = [round_downer((10 ** x)) for x in np.linspace(0, (np.log(freqs.max()) / np.log(10)), 5)]
x_axis_values = [x for x in x_axis_values if ((x > 1) and (x <= freqs.max()))]
frequencies_log_scaled = frequency_transform(freqs)
if ('scores' not in kwargs):
kwargs['scores'] = get_term_scorer_scores(category, corpus, kwargs.get('neutral_categories', False), not_categories, kwargs.get('show_neutral', False), term_ranker, term_scorer, kwargs.get('use_non_text_features', False))
def y_axis_rescale(coords):
return ((((coords - 0.5) / np.abs((coords - 0.5)).max()) + 1) / 2)
def round_to_1(x):
if (x == 0):
return 0
return round(x, (- int(np.floor(np.log10(abs(x))))))
if (y_axis_values is None):
max_score = (np.floor((np.max(kwargs['scores']) * 100)) / 100)
min_score = (np.ceil((np.min(kwargs['scores']) * 100)) / 100)
if ((min_score < 0) and (max_score > 0)):
central = 0
else:
central = 0.5
y_axis_values = [x for x in [min_score, central, max_score] if ((x >= min_score) and (x <= max_score))]
scores_scaled_for_charting = scale_neg_1_to_1_with_zero_mean_abs_max(kwargs['scores'])
if use_term_significance:
kwargs['term_significance'] = term_scorer
kwargs['y_label'] = kwargs.get('y_label', term_scorer.get_name())
kwargs['color_func'] = kwargs.get('color_func', ('(function(d) {\n\treturn (Math.abs(d.os) < %s) \n\t ? d3.interpolate(d3.rgb(230, 230, 230), d3.rgb(130, 130, 130))(Math.abs(d.os)/%s) \n\t : d3.interpolateRdYlBu(d.y);\n\t})' % (grey_threshold, grey_threshold)))
return produce_scattertext_explorer(corpus, category=category, category_name=category_name, not_category_name=not_category_name, x_coords=frequencies_log_scaled, y_coords=scores_scaled_for_charting, original_x=freqs, original_y=kwargs['scores'], x_axis_values=x_axis_values, y_axis_values=y_axis_values, rescale_x=scale, rescale_y=y_axis_rescale, sort_by_dist=False, term_ranker=term_ranker, not_categories=not_categories, x_label=kwargs.get('x_label', 'Log Frequency'), **kwargs) | def produce_frequency_explorer(corpus, category, category_name=None, not_category_name=None, term_ranker=termranking.AbsoluteFrequencyRanker, alpha=0.01, use_term_significance=False, term_scorer=None, not_categories=None, grey_threshold=0, y_axis_values=None, frequency_transform=(lambda x: scale((np.log(x) - np.log(1)))), **kwargs):
'\n Produces a Monroe et al. style visualization, with the x-axis being the log frequency\n\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str or None\n Name of category to use. E.g., "5-star reviews."\n Defaults to category\n not_category_name : str or None\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n Defaults to "Not " + category_name\n term_ranker : TermRanker\n TermRanker class for determining term frequency ranks.\n alpha : float, default = 0.01\n Uniform dirichlet prior for p-value calculation\n use_term_significance : bool, True by default\n Use term scorer\n term_scorer : TermSignificance\n Subclass of TermSignificance to use as for scores and significance\n not_categories : list\n All categories other than category by default. Documents labeled\n with remaining category.\n grey_threshold : float\n Score to grey points. Default is 1.96\n y_axis_values : list\n Custom y-axis values. Defaults to linspace\n frequency_transfom : lambda, default lambda x: scale(np.log(x) - np.log(1))\n Takes a vector of frequencies and returns their x-axis scale.\n Remaining arguments are from `produce_scattertext_explorer`.\'\n Returns\n -------\n str, html of visualization\n '
if (not_categories is None):
not_categories = [c for c in corpus.get_categories() if (c != category)]
if (term_scorer is None):
term_scorer = LogOddsRatioUninformativeDirichletPrior(alpha)
my_term_ranker = term_ranker(corpus)
if kwargs.get('use_non_text_features', False):
my_term_ranker.use_non_text_features()
term_freq_df = (my_term_ranker.get_ranks() + 1)
freqs = term_freq_df[[(c + ' freq') for c in ([category] + not_categories)]].sum(axis=1).values
x_axis_values = [round_downer((10 ** x)) for x in np.linspace(0, (np.log(freqs.max()) / np.log(10)), 5)]
x_axis_values = [x for x in x_axis_values if ((x > 1) and (x <= freqs.max()))]
frequencies_log_scaled = frequency_transform(freqs)
if ('scores' not in kwargs):
kwargs['scores'] = get_term_scorer_scores(category, corpus, kwargs.get('neutral_categories', False), not_categories, kwargs.get('show_neutral', False), term_ranker, term_scorer, kwargs.get('use_non_text_features', False))
def y_axis_rescale(coords):
return ((((coords - 0.5) / np.abs((coords - 0.5)).max()) + 1) / 2)
def round_to_1(x):
if (x == 0):
return 0
return round(x, (- int(np.floor(np.log10(abs(x))))))
if (y_axis_values is None):
max_score = (np.floor((np.max(kwargs['scores']) * 100)) / 100)
min_score = (np.ceil((np.min(kwargs['scores']) * 100)) / 100)
if ((min_score < 0) and (max_score > 0)):
central = 0
else:
central = 0.5
y_axis_values = [x for x in [min_score, central, max_score] if ((x >= min_score) and (x <= max_score))]
scores_scaled_for_charting = scale_neg_1_to_1_with_zero_mean_abs_max(kwargs['scores'])
if use_term_significance:
kwargs['term_significance'] = term_scorer
kwargs['y_label'] = kwargs.get('y_label', term_scorer.get_name())
kwargs['color_func'] = kwargs.get('color_func', ('(function(d) {\n\treturn (Math.abs(d.os) < %s) \n\t ? d3.interpolate(d3.rgb(230, 230, 230), d3.rgb(130, 130, 130))(Math.abs(d.os)/%s) \n\t : d3.interpolateRdYlBu(d.y);\n\t})' % (grey_threshold, grey_threshold)))
return produce_scattertext_explorer(corpus, category=category, category_name=category_name, not_category_name=not_category_name, x_coords=frequencies_log_scaled, y_coords=scores_scaled_for_charting, original_x=freqs, original_y=kwargs['scores'], x_axis_values=x_axis_values, y_axis_values=y_axis_values, rescale_x=scale, rescale_y=y_axis_rescale, sort_by_dist=False, term_ranker=term_ranker, not_categories=not_categories, x_label=kwargs.get('x_label', 'Log Frequency'), **kwargs)<|docstring|>Produces a Monroe et al. style visualization, with the x-axis being the log frequency
Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str or None
Name of category to use. E.g., "5-star reviews."
Defaults to category
not_category_name : str or None
Name of everything that isn't in category. E.g., "Below 5-star reviews".
Defaults to "Not " + category_name
term_ranker : TermRanker
TermRanker class for determining term frequency ranks.
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
use_term_significance : bool, True by default
Use term scorer
term_scorer : TermSignificance
Subclass of TermSignificance to use as for scores and significance
not_categories : list
All categories other than category by default. Documents labeled
with remaining category.
grey_threshold : float
Score to grey points. Default is 1.96
y_axis_values : list
Custom y-axis values. Defaults to linspace
frequency_transfom : lambda, default lambda x: scale(np.log(x) - np.log(1))
Takes a vector of frequencies and returns their x-axis scale.
Remaining arguments are from `produce_scattertext_explorer`.'
Returns
-------
str, html of visualization<|endoftext|> |
573bfc02faca91fe4799d1319bf7247bd9826ac829f031cee9d2e92965a82918 | def produce_semiotic_square_explorer(semiotic_square, x_label, y_label, category_name=None, not_category_name=None, neutral_category_name=None, num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, color_func=None, axis_scaler=scale_neg_1_to_1_with_zero_mean, **kwargs):
'\n Produces a semiotic square visualization.\n\n Parameters\n ----------\n semiotic_square : SemioticSquare\n The basis of the visualization\n x_label : str\n The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.\n y_label\n The y-axis label in the scatter plot. Relationship neutral term and complex term.\n category_name : str or None\n Name of category to use. Defaults to category_a.\n not_category_name : str or None\n Name of everything that isn\'t in category. Defaults to category_b.\n neutral_category_name : str or None\n Name of neutral set of data. Defaults to "Neutral".\n num_terms_semiotic_square : int or None\n 10 by default. Number of terms to show in semiotic square.\n get_tooltip_content : str or None\n Defaults to tooltip showing z-scores on both axes.\n x_axis_values : list, default None\n Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n y_axis_values : list, default None\n Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.\n axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max\n Scale values to fit axis\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
if (category_name is None):
category_name = semiotic_square.category_a_
if (not_category_name is None):
not_category_name = semiotic_square.category_b_
if (get_tooltip_content is None):
get_tooltip_content = ('(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})' % (x_label, y_label))
if (color_func is None):
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
'\n my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max\n if foveate:\n my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max\n '
axes = semiotic_square.get_axes()
return produce_scattertext_explorer(semiotic_square.term_doc_matrix_, category=semiotic_square.category_a_, category_name=category_name, not_category_name=not_category_name, not_categories=[semiotic_square.category_b_], scores=(- axes['x']), sort_by_dist=False, x_coords=axis_scaler((- axes['x'])), y_coords=axis_scaler(axes['y']), original_x=(- axes['x']), original_y=axes['y'], show_characteristic=False, show_top_terms=False, x_label=x_label, y_label=y_label, semiotic_square=semiotic_square, neutral_categories=semiotic_square.neutral_categories_, show_neutral=True, neutral_category_name=neutral_category_name, num_terms_semiotic_square=num_terms_semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=False, **kwargs) | Produces a semiotic square visualization.
Parameters
----------
semiotic_square : SemioticSquare
The basis of the visualization
x_label : str
The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.
y_label
The y-axis label in the scatter plot. Relationship neutral term and complex term.
category_name : str or None
Name of category to use. Defaults to category_a.
not_category_name : str or None
Name of everything that isn't in category. Defaults to category_b.
neutral_category_name : str or None
Name of neutral set of data. Defaults to "Neutral".
num_terms_semiotic_square : int or None
10 by default. Number of terms to show in semiotic square.
get_tooltip_content : str or None
Defaults to tooltip showing z-scores on both axes.
x_axis_values : list, default None
Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
y_axis_values : list, default None
Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
color_func : str, default None
Javascript function to control color of a point. Function takes a parameter
which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and
returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.
axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max
Scale values to fit axis
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization | scattertext/__init__.py | produce_semiotic_square_explorer | JasonKessler/scattertext | 1,823 | python | def produce_semiotic_square_explorer(semiotic_square, x_label, y_label, category_name=None, not_category_name=None, neutral_category_name=None, num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, color_func=None, axis_scaler=scale_neg_1_to_1_with_zero_mean, **kwargs):
'\n Produces a semiotic square visualization.\n\n Parameters\n ----------\n semiotic_square : SemioticSquare\n The basis of the visualization\n x_label : str\n The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.\n y_label\n The y-axis label in the scatter plot. Relationship neutral term and complex term.\n category_name : str or None\n Name of category to use. Defaults to category_a.\n not_category_name : str or None\n Name of everything that isn\'t in category. Defaults to category_b.\n neutral_category_name : str or None\n Name of neutral set of data. Defaults to "Neutral".\n num_terms_semiotic_square : int or None\n 10 by default. Number of terms to show in semiotic square.\n get_tooltip_content : str or None\n Defaults to tooltip showing z-scores on both axes.\n x_axis_values : list, default None\n Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n y_axis_values : list, default None\n Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.\n axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max\n Scale values to fit axis\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
if (category_name is None):
category_name = semiotic_square.category_a_
if (not_category_name is None):
not_category_name = semiotic_square.category_b_
if (get_tooltip_content is None):
get_tooltip_content = ('(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})' % (x_label, y_label))
if (color_func is None):
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
'\n my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max\n if foveate:\n my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max\n '
axes = semiotic_square.get_axes()
return produce_scattertext_explorer(semiotic_square.term_doc_matrix_, category=semiotic_square.category_a_, category_name=category_name, not_category_name=not_category_name, not_categories=[semiotic_square.category_b_], scores=(- axes['x']), sort_by_dist=False, x_coords=axis_scaler((- axes['x'])), y_coords=axis_scaler(axes['y']), original_x=(- axes['x']), original_y=axes['y'], show_characteristic=False, show_top_terms=False, x_label=x_label, y_label=y_label, semiotic_square=semiotic_square, neutral_categories=semiotic_square.neutral_categories_, show_neutral=True, neutral_category_name=neutral_category_name, num_terms_semiotic_square=num_terms_semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=False, **kwargs) | def produce_semiotic_square_explorer(semiotic_square, x_label, y_label, category_name=None, not_category_name=None, neutral_category_name=None, num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, color_func=None, axis_scaler=scale_neg_1_to_1_with_zero_mean, **kwargs):
'\n Produces a semiotic square visualization.\n\n Parameters\n ----------\n semiotic_square : SemioticSquare\n The basis of the visualization\n x_label : str\n The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.\n y_label\n The y-axis label in the scatter plot. Relationship neutral term and complex term.\n category_name : str or None\n Name of category to use. Defaults to category_a.\n not_category_name : str or None\n Name of everything that isn\'t in category. Defaults to category_b.\n neutral_category_name : str or None\n Name of neutral set of data. Defaults to "Neutral".\n num_terms_semiotic_square : int or None\n 10 by default. Number of terms to show in semiotic square.\n get_tooltip_content : str or None\n Defaults to tooltip showing z-scores on both axes.\n x_axis_values : list, default None\n Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n y_axis_values : list, default None\n Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.\n axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max\n Scale values to fit axis\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
if (category_name is None):
category_name = semiotic_square.category_a_
if (not_category_name is None):
not_category_name = semiotic_square.category_b_
if (get_tooltip_content is None):
get_tooltip_content = ('(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})' % (x_label, y_label))
if (color_func is None):
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
'\n my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max\n if foveate:\n my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max\n '
axes = semiotic_square.get_axes()
return produce_scattertext_explorer(semiotic_square.term_doc_matrix_, category=semiotic_square.category_a_, category_name=category_name, not_category_name=not_category_name, not_categories=[semiotic_square.category_b_], scores=(- axes['x']), sort_by_dist=False, x_coords=axis_scaler((- axes['x'])), y_coords=axis_scaler(axes['y']), original_x=(- axes['x']), original_y=axes['y'], show_characteristic=False, show_top_terms=False, x_label=x_label, y_label=y_label, semiotic_square=semiotic_square, neutral_categories=semiotic_square.neutral_categories_, show_neutral=True, neutral_category_name=neutral_category_name, num_terms_semiotic_square=num_terms_semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=False, **kwargs)<|docstring|>Produces a semiotic square visualization.
Parameters
----------
semiotic_square : SemioticSquare
The basis of the visualization
x_label : str
The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.
y_label
The y-axis label in the scatter plot. Relationship neutral term and complex term.
category_name : str or None
Name of category to use. Defaults to category_a.
not_category_name : str or None
Name of everything that isn't in category. Defaults to category_b.
neutral_category_name : str or None
Name of neutral set of data. Defaults to "Neutral".
num_terms_semiotic_square : int or None
10 by default. Number of terms to show in semiotic square.
get_tooltip_content : str or None
Defaults to tooltip showing z-scores on both axes.
x_axis_values : list, default None
Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
y_axis_values : list, default None
Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
color_func : str, default None
Javascript function to control color of a point. Function takes a parameter
which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and
returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.
axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max
Scale values to fit axis
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization<|endoftext|> |
37156dd7318bd06ce18a3e4025f0690de906b8b8e8be661eb3a332d6ce452dbd | def produce_four_square_explorer(four_square, x_label=None, y_label=None, a_category_name=None, b_category_name=None, not_a_category_name=None, not_b_category_name=None, num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, color_func=None, axis_scaler=scale_neg_1_to_1_with_zero_mean, **kwargs):
'\n Produces a semiotic square visualization.\n\n Parameters\n ----------\n four_square : FourSquare\n The basis of the visualization\n x_label : str\n The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.\n y_label\n The y-axis label in the scatter plot. Relationship neutral term and complex term.\n a_category_name : str or None\n Name of category to use. Defaults to category_a.\n b_category_name : str or None\n Name of everything that isn\'t in category. Defaults to category_b.\n not_a_category_name : str or None\n Name of neutral set of data. Defaults to "Neutral".\n not_b_category_name: str or None\n Name of neutral set of data. Defaults to "Extra".\n num_terms_semiotic_square : int or None\n 10 by default. Number of terms to show in semiotic square.\n get_tooltip_content : str or None\n Defaults to tooltip showing z-scores on both axes.\n x_axis_values : list, default None\n Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n y_axis_values : list, default None\n Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.\n axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max\n Scale values to fit axis\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
if (a_category_name is None):
a_category_name = four_square.get_labels()['a_label']
if ((a_category_name is None) or (a_category_name == '')):
a_category_name = four_square.category_a_list_[0]
if (b_category_name is None):
b_category_name = four_square.get_labels()['b_label']
if ((b_category_name is None) or (b_category_name == '')):
b_category_name = four_square.category_b_list_[0]
if (not_a_category_name is None):
not_a_category_name = four_square.get_labels()['not_a_label']
if ((not_a_category_name is None) or (not_a_category_name == '')):
not_a_category_name = four_square.not_category_a_list_[0]
if (not_b_category_name is None):
not_b_category_name = four_square.get_labels()['not_b_label']
if ((not_b_category_name is None) or (not_b_category_name == '')):
not_b_category_name = four_square.not_category_b_list_[0]
if (x_label is None):
x_label = ((a_category_name + '-') + b_category_name)
if (y_label is None):
y_label = ((not_a_category_name + '-') + not_b_category_name)
if (get_tooltip_content is None):
get_tooltip_content = ('(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})' % (x_label, y_label))
if (color_func is None):
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
'\n my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max\n if foveate:\n my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max\n '
axes = four_square.get_axes()
if ('scores' not in kwargs):
kwargs['scores'] = (- axes['x'])
return produce_scattertext_explorer(four_square.term_doc_matrix_, category=list((set(four_square.category_a_list_) - set(four_square.category_b_list_)))[0], category_name=a_category_name, not_category_name=b_category_name, not_categories=four_square.category_b_list_, neutral_categories=four_square.not_category_a_list_, extra_categories=four_square.not_category_b_list_, sort_by_dist=False, x_coords=axis_scaler((- axes['x'])), y_coords=axis_scaler(axes['y']), original_x=(- axes['x']), original_y=axes['y'], show_characteristic=False, show_top_terms=False, x_label=x_label, y_label=y_label, semiotic_square=four_square, show_neutral=True, neutral_category_name=not_a_category_name, show_extra=True, extra_category_name=not_b_category_name, num_terms_semiotic_square=num_terms_semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=False, **kwargs) | Produces a semiotic square visualization.
Parameters
----------
four_square : FourSquare
The basis of the visualization
x_label : str
The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.
y_label
The y-axis label in the scatter plot. Relationship neutral term and complex term.
a_category_name : str or None
Name of category to use. Defaults to category_a.
b_category_name : str or None
Name of everything that isn't in category. Defaults to category_b.
not_a_category_name : str or None
Name of neutral set of data. Defaults to "Neutral".
not_b_category_name: str or None
Name of neutral set of data. Defaults to "Extra".
num_terms_semiotic_square : int or None
10 by default. Number of terms to show in semiotic square.
get_tooltip_content : str or None
Defaults to tooltip showing z-scores on both axes.
x_axis_values : list, default None
Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
y_axis_values : list, default None
Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
color_func : str, default None
Javascript function to control color of a point. Function takes a parameter
which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and
returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.
axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max
Scale values to fit axis
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization | scattertext/__init__.py | produce_four_square_explorer | JasonKessler/scattertext | 1,823 | python | def produce_four_square_explorer(four_square, x_label=None, y_label=None, a_category_name=None, b_category_name=None, not_a_category_name=None, not_b_category_name=None, num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, color_func=None, axis_scaler=scale_neg_1_to_1_with_zero_mean, **kwargs):
'\n Produces a semiotic square visualization.\n\n Parameters\n ----------\n four_square : FourSquare\n The basis of the visualization\n x_label : str\n The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.\n y_label\n The y-axis label in the scatter plot. Relationship neutral term and complex term.\n a_category_name : str or None\n Name of category to use. Defaults to category_a.\n b_category_name : str or None\n Name of everything that isn\'t in category. Defaults to category_b.\n not_a_category_name : str or None\n Name of neutral set of data. Defaults to "Neutral".\n not_b_category_name: str or None\n Name of neutral set of data. Defaults to "Extra".\n num_terms_semiotic_square : int or None\n 10 by default. Number of terms to show in semiotic square.\n get_tooltip_content : str or None\n Defaults to tooltip showing z-scores on both axes.\n x_axis_values : list, default None\n Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n y_axis_values : list, default None\n Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.\n axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max\n Scale values to fit axis\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
if (a_category_name is None):
a_category_name = four_square.get_labels()['a_label']
if ((a_category_name is None) or (a_category_name == )):
a_category_name = four_square.category_a_list_[0]
if (b_category_name is None):
b_category_name = four_square.get_labels()['b_label']
if ((b_category_name is None) or (b_category_name == )):
b_category_name = four_square.category_b_list_[0]
if (not_a_category_name is None):
not_a_category_name = four_square.get_labels()['not_a_label']
if ((not_a_category_name is None) or (not_a_category_name == )):
not_a_category_name = four_square.not_category_a_list_[0]
if (not_b_category_name is None):
not_b_category_name = four_square.get_labels()['not_b_label']
if ((not_b_category_name is None) or (not_b_category_name == )):
not_b_category_name = four_square.not_category_b_list_[0]
if (x_label is None):
x_label = ((a_category_name + '-') + b_category_name)
if (y_label is None):
y_label = ((not_a_category_name + '-') + not_b_category_name)
if (get_tooltip_content is None):
get_tooltip_content = ('(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})' % (x_label, y_label))
if (color_func is None):
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
'\n my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max\n if foveate:\n my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max\n '
axes = four_square.get_axes()
if ('scores' not in kwargs):
kwargs['scores'] = (- axes['x'])
return produce_scattertext_explorer(four_square.term_doc_matrix_, category=list((set(four_square.category_a_list_) - set(four_square.category_b_list_)))[0], category_name=a_category_name, not_category_name=b_category_name, not_categories=four_square.category_b_list_, neutral_categories=four_square.not_category_a_list_, extra_categories=four_square.not_category_b_list_, sort_by_dist=False, x_coords=axis_scaler((- axes['x'])), y_coords=axis_scaler(axes['y']), original_x=(- axes['x']), original_y=axes['y'], show_characteristic=False, show_top_terms=False, x_label=x_label, y_label=y_label, semiotic_square=four_square, show_neutral=True, neutral_category_name=not_a_category_name, show_extra=True, extra_category_name=not_b_category_name, num_terms_semiotic_square=num_terms_semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=False, **kwargs) | def produce_four_square_explorer(four_square, x_label=None, y_label=None, a_category_name=None, b_category_name=None, not_a_category_name=None, not_b_category_name=None, num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, color_func=None, axis_scaler=scale_neg_1_to_1_with_zero_mean, **kwargs):
'\n Produces a semiotic square visualization.\n\n Parameters\n ----------\n four_square : FourSquare\n The basis of the visualization\n x_label : str\n The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.\n y_label\n The y-axis label in the scatter plot. Relationship neutral term and complex term.\n a_category_name : str or None\n Name of category to use. Defaults to category_a.\n b_category_name : str or None\n Name of everything that isn\'t in category. Defaults to category_b.\n not_a_category_name : str or None\n Name of neutral set of data. Defaults to "Neutral".\n not_b_category_name: str or None\n Name of neutral set of data. Defaults to "Extra".\n num_terms_semiotic_square : int or None\n 10 by default. Number of terms to show in semiotic square.\n get_tooltip_content : str or None\n Defaults to tooltip showing z-scores on both axes.\n x_axis_values : list, default None\n Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n y_axis_values : list, default None\n Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.\n axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max\n Scale values to fit axis\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
if (a_category_name is None):
a_category_name = four_square.get_labels()['a_label']
if ((a_category_name is None) or (a_category_name == )):
a_category_name = four_square.category_a_list_[0]
if (b_category_name is None):
b_category_name = four_square.get_labels()['b_label']
if ((b_category_name is None) or (b_category_name == )):
b_category_name = four_square.category_b_list_[0]
if (not_a_category_name is None):
not_a_category_name = four_square.get_labels()['not_a_label']
if ((not_a_category_name is None) or (not_a_category_name == )):
not_a_category_name = four_square.not_category_a_list_[0]
if (not_b_category_name is None):
not_b_category_name = four_square.get_labels()['not_b_label']
if ((not_b_category_name is None) or (not_b_category_name == )):
not_b_category_name = four_square.not_category_b_list_[0]
if (x_label is None):
x_label = ((a_category_name + '-') + b_category_name)
if (y_label is None):
y_label = ((not_a_category_name + '-') + not_b_category_name)
if (get_tooltip_content is None):
get_tooltip_content = ('(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})' % (x_label, y_label))
if (color_func is None):
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
'\n my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max\n if foveate:\n my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max\n '
axes = four_square.get_axes()
if ('scores' not in kwargs):
kwargs['scores'] = (- axes['x'])
return produce_scattertext_explorer(four_square.term_doc_matrix_, category=list((set(four_square.category_a_list_) - set(four_square.category_b_list_)))[0], category_name=a_category_name, not_category_name=b_category_name, not_categories=four_square.category_b_list_, neutral_categories=four_square.not_category_a_list_, extra_categories=four_square.not_category_b_list_, sort_by_dist=False, x_coords=axis_scaler((- axes['x'])), y_coords=axis_scaler(axes['y']), original_x=(- axes['x']), original_y=axes['y'], show_characteristic=False, show_top_terms=False, x_label=x_label, y_label=y_label, semiotic_square=four_square, show_neutral=True, neutral_category_name=not_a_category_name, show_extra=True, extra_category_name=not_b_category_name, num_terms_semiotic_square=num_terms_semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=False, **kwargs)<|docstring|>Produces a semiotic square visualization.
Parameters
----------
four_square : FourSquare
The basis of the visualization
x_label : str
The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.
y_label
The y-axis label in the scatter plot. Relationship neutral term and complex term.
a_category_name : str or None
Name of category to use. Defaults to category_a.
b_category_name : str or None
Name of everything that isn't in category. Defaults to category_b.
not_a_category_name : str or None
Name of neutral set of data. Defaults to "Neutral".
not_b_category_name: str or None
Name of neutral set of data. Defaults to "Extra".
num_terms_semiotic_square : int or None
10 by default. Number of terms to show in semiotic square.
get_tooltip_content : str or None
Defaults to tooltip showing z-scores on both axes.
x_axis_values : list, default None
Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
y_axis_values : list, default None
Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
color_func : str, default None
Javascript function to control color of a point. Function takes a parameter
which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and
returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.
axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max
Scale values to fit axis
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization<|endoftext|> |
64223550e6f20db44e98ca384c939517d4771af97ba8eeac05f33beaf56f2527 | def produce_four_square_axes_explorer(four_square_axes, x_label=None, y_label=None, num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, color_func=None, axis_scaler=scale_neg_1_to_1_with_zero_mean, **kwargs):
'\n Produces a semiotic square visualization.\n\n Parameters\n ----------\n four_square : FourSquareAxes\n The basis of the visualization\n x_label : str\n The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.\n y_label\n The y-axis label in the scatter plot. Relationship neutral term and complex term.\n not_b_category_name: str or None\n Name of neutral set of data. Defaults to "Extra".\n num_terms_semiotic_square : int or None\n 10 by default. Number of terms to show in semiotic square.\n get_tooltip_content : str or None\n Defaults to tooltip showing z-scores on both axes.\n x_axis_values : list, default None\n Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n y_axis_values : list, default None\n Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.\n axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max\n Scale values to fit axis\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
if (x_label is None):
x_label = ((four_square_axes.left_category_name_ + '-') + four_square_axes.right_category_name_)
if (y_label is None):
y_label = ((four_square_axes.top_category_name_ + '-') + four_square_axes.bottom_category_name_)
if (get_tooltip_content is None):
get_tooltip_content = ('(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})' % (x_label, y_label))
if (color_func is None):
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
axes = four_square_axes.get_axes()
if ('scores' not in kwargs):
kwargs['scores'] = (- axes['x'])
'\n my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max\n if foveate:\n my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max\n '
return produce_scattertext_explorer(four_square_axes.term_doc_matrix_, category=four_square_axes.left_categories_[0], category_name=four_square_axes.left_category_name_, not_categories=four_square_axes.right_categories_, not_category_name=four_square_axes.right_category_name_, neutral_categories=four_square_axes.top_categories_, neutral_category_name=four_square_axes.top_category_name_, extra_categories=four_square_axes.bottom_categories_, extra_category_name=four_square_axes.bottom_category_name_, sort_by_dist=False, x_coords=axis_scaler((- axes['x'])), y_coords=axis_scaler(axes['y']), original_x=(- axes['x']), original_y=axes['y'], show_characteristic=False, show_top_terms=False, x_label=x_label, y_label=y_label, semiotic_square=four_square_axes, show_neutral=True, show_extra=True, num_terms_semiotic_square=num_terms_semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=False, **kwargs) | Produces a semiotic square visualization.
Parameters
----------
four_square : FourSquareAxes
The basis of the visualization
x_label : str
The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.
y_label
The y-axis label in the scatter plot. Relationship neutral term and complex term.
not_b_category_name: str or None
Name of neutral set of data. Defaults to "Extra".
num_terms_semiotic_square : int or None
10 by default. Number of terms to show in semiotic square.
get_tooltip_content : str or None
Defaults to tooltip showing z-scores on both axes.
x_axis_values : list, default None
Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
y_axis_values : list, default None
Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
color_func : str, default None
Javascript function to control color of a point. Function takes a parameter
which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and
returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.
axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max
Scale values to fit axis
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization | scattertext/__init__.py | produce_four_square_axes_explorer | JasonKessler/scattertext | 1,823 | python | def produce_four_square_axes_explorer(four_square_axes, x_label=None, y_label=None, num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, color_func=None, axis_scaler=scale_neg_1_to_1_with_zero_mean, **kwargs):
'\n Produces a semiotic square visualization.\n\n Parameters\n ----------\n four_square : FourSquareAxes\n The basis of the visualization\n x_label : str\n The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.\n y_label\n The y-axis label in the scatter plot. Relationship neutral term and complex term.\n not_b_category_name: str or None\n Name of neutral set of data. Defaults to "Extra".\n num_terms_semiotic_square : int or None\n 10 by default. Number of terms to show in semiotic square.\n get_tooltip_content : str or None\n Defaults to tooltip showing z-scores on both axes.\n x_axis_values : list, default None\n Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n y_axis_values : list, default None\n Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.\n axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max\n Scale values to fit axis\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
if (x_label is None):
x_label = ((four_square_axes.left_category_name_ + '-') + four_square_axes.right_category_name_)
if (y_label is None):
y_label = ((four_square_axes.top_category_name_ + '-') + four_square_axes.bottom_category_name_)
if (get_tooltip_content is None):
get_tooltip_content = ('(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})' % (x_label, y_label))
if (color_func is None):
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
axes = four_square_axes.get_axes()
if ('scores' not in kwargs):
kwargs['scores'] = (- axes['x'])
'\n my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max\n if foveate:\n my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max\n '
return produce_scattertext_explorer(four_square_axes.term_doc_matrix_, category=four_square_axes.left_categories_[0], category_name=four_square_axes.left_category_name_, not_categories=four_square_axes.right_categories_, not_category_name=four_square_axes.right_category_name_, neutral_categories=four_square_axes.top_categories_, neutral_category_name=four_square_axes.top_category_name_, extra_categories=four_square_axes.bottom_categories_, extra_category_name=four_square_axes.bottom_category_name_, sort_by_dist=False, x_coords=axis_scaler((- axes['x'])), y_coords=axis_scaler(axes['y']), original_x=(- axes['x']), original_y=axes['y'], show_characteristic=False, show_top_terms=False, x_label=x_label, y_label=y_label, semiotic_square=four_square_axes, show_neutral=True, show_extra=True, num_terms_semiotic_square=num_terms_semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=False, **kwargs) | def produce_four_square_axes_explorer(four_square_axes, x_label=None, y_label=None, num_terms_semiotic_square=None, get_tooltip_content=None, x_axis_values=None, y_axis_values=None, color_func=None, axis_scaler=scale_neg_1_to_1_with_zero_mean, **kwargs):
'\n Produces a semiotic square visualization.\n\n Parameters\n ----------\n four_square : FourSquareAxes\n The basis of the visualization\n x_label : str\n The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.\n y_label\n The y-axis label in the scatter plot. Relationship neutral term and complex term.\n not_b_category_name: str or None\n Name of neutral set of data. Defaults to "Extra".\n num_terms_semiotic_square : int or None\n 10 by default. Number of terms to show in semiotic square.\n get_tooltip_content : str or None\n Defaults to tooltip showing z-scores on both axes.\n x_axis_values : list, default None\n Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n y_axis_values : list, default None\n Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default\n color_func : str, default None\n Javascript function to control color of a point. Function takes a parameter\n which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and\n returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.\n axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max\n Scale values to fit axis\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
if (x_label is None):
x_label = ((four_square_axes.left_category_name_ + '-') + four_square_axes.right_category_name_)
if (y_label is None):
y_label = ((four_square_axes.top_category_name_ + '-') + four_square_axes.bottom_category_name_)
if (get_tooltip_content is None):
get_tooltip_content = ('(function(d) {return d.term + "<br/>%s: " + Math.round(d.ox*1000)/1000+"<br/>%s: " + Math.round(d.oy*1000)/1000})' % (x_label, y_label))
if (color_func is None):
color_func = '(function(d) {return d3.interpolateRdYlBu(d.x)})'
axes = four_square_axes.get_axes()
if ('scores' not in kwargs):
kwargs['scores'] = (- axes['x'])
'\n my_scaler = scale_neg_1_to_1_with_zero_mean_abs_max\n if foveate:\n my_scaler = scale_neg_1_to_1_with_zero_mean_rank_abs_max\n '
return produce_scattertext_explorer(four_square_axes.term_doc_matrix_, category=four_square_axes.left_categories_[0], category_name=four_square_axes.left_category_name_, not_categories=four_square_axes.right_categories_, not_category_name=four_square_axes.right_category_name_, neutral_categories=four_square_axes.top_categories_, neutral_category_name=four_square_axes.top_category_name_, extra_categories=four_square_axes.bottom_categories_, extra_category_name=four_square_axes.bottom_category_name_, sort_by_dist=False, x_coords=axis_scaler((- axes['x'])), y_coords=axis_scaler(axes['y']), original_x=(- axes['x']), original_y=axes['y'], show_characteristic=False, show_top_terms=False, x_label=x_label, y_label=y_label, semiotic_square=four_square_axes, show_neutral=True, show_extra=True, num_terms_semiotic_square=num_terms_semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=x_axis_values, y_axis_values=y_axis_values, color_func=color_func, show_axes=False, **kwargs)<|docstring|>Produces a semiotic square visualization.
Parameters
----------
four_square : FourSquareAxes
The basis of the visualization
x_label : str
The x-axis label in the scatter plot. Relationship between `category_a` and `category_b`.
y_label
The y-axis label in the scatter plot. Relationship neutral term and complex term.
not_b_category_name: str or None
Name of neutral set of data. Defaults to "Extra".
num_terms_semiotic_square : int or None
10 by default. Number of terms to show in semiotic square.
get_tooltip_content : str or None
Defaults to tooltip showing z-scores on both axes.
x_axis_values : list, default None
Value-labels to show on x-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
y_axis_values : list, default None
Value-labels to show on y-axis. [-2.58, -1.96, 0, 1.96, 2.58] is the default
color_func : str, default None
Javascript function to control color of a point. Function takes a parameter
which is a dictionary entry produced by `ScatterChartExplorer.to_dict` and
returns a string. Defaults to RdYlBl on x-axis, and varying saturation on y-axis.
axis_scaler : lambda, default scale_neg_1_to_1_with_zero_mean_abs_max
Scale values to fit axis
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization<|endoftext|> |
c67037e1966c0df8a7a863cf0cd1a9649ef9969c3d20b060e3462e47e63965a1 | def produce_projection_explorer(corpus, category, word2vec_model=None, projection_model=None, embeddings=None, term_acceptance_re=re.compile('[a-z]{3,}'), show_axes=False, **kwargs):
"\n Parameters\n ----------\n corpus : ParsedCorpus\n It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`\n category : str\n word2vec_model : Word2Vec\n A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default\n model.\n projection_model : sklearn-style dimensionality reduction model.\n By default: umap.UMAP(min_dist=0.5, metric='cosine')\n You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23)\n embeddings : array[len(corpus.get_terms()), X]\n Word embeddings. If None (default), will train them using word2vec Model\n term_acceptance_re : SRE_Pattern,\n Regular expression to identify valid terms\n show_axes : bool, default False\n Show the ticked axes on the plot. If false, show inner axes as a crosshair.\n kwargs : dict\n Remaining produce_scattertext_explorer keywords get_tooltip_content\n\n Returns\n -------\n str\n HTML of visualization\n\n "
embeddings_resolover = EmbeddingsResolver(corpus)
if (embeddings is not None):
embeddings_resolover.set_embeddings(embeddings)
else:
embeddings_resolover.set_embeddings_model(word2vec_model, term_acceptance_re)
(corpus, word_axes) = embeddings_resolover.project_embeddings(projection_model, x_dim=0, y_dim=1)
html = produce_scattertext_explorer(corpus=corpus, category=category, minimum_term_frequency=0, sort_by_dist=False, x_coords=scale(word_axes['x']), y_coords=scale(word_axes['y']), y_label='', x_label='', show_axes=show_axes, **kwargs)
return html | Parameters
----------
corpus : ParsedCorpus
It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`
category : str
word2vec_model : Word2Vec
A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default
model.
projection_model : sklearn-style dimensionality reduction model.
By default: umap.UMAP(min_dist=0.5, metric='cosine')
You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23)
embeddings : array[len(corpus.get_terms()), X]
Word embeddings. If None (default), will train them using word2vec Model
term_acceptance_re : SRE_Pattern,
Regular expression to identify valid terms
show_axes : bool, default False
Show the ticked axes on the plot. If false, show inner axes as a crosshair.
kwargs : dict
Remaining produce_scattertext_explorer keywords get_tooltip_content
Returns
-------
str
HTML of visualization | scattertext/__init__.py | produce_projection_explorer | JasonKessler/scattertext | 1,823 | python | def produce_projection_explorer(corpus, category, word2vec_model=None, projection_model=None, embeddings=None, term_acceptance_re=re.compile('[a-z]{3,}'), show_axes=False, **kwargs):
"\n Parameters\n ----------\n corpus : ParsedCorpus\n It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`\n category : str\n word2vec_model : Word2Vec\n A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default\n model.\n projection_model : sklearn-style dimensionality reduction model.\n By default: umap.UMAP(min_dist=0.5, metric='cosine')\n You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23)\n embeddings : array[len(corpus.get_terms()), X]\n Word embeddings. If None (default), will train them using word2vec Model\n term_acceptance_re : SRE_Pattern,\n Regular expression to identify valid terms\n show_axes : bool, default False\n Show the ticked axes on the plot. If false, show inner axes as a crosshair.\n kwargs : dict\n Remaining produce_scattertext_explorer keywords get_tooltip_content\n\n Returns\n -------\n str\n HTML of visualization\n\n "
embeddings_resolover = EmbeddingsResolver(corpus)
if (embeddings is not None):
embeddings_resolover.set_embeddings(embeddings)
else:
embeddings_resolover.set_embeddings_model(word2vec_model, term_acceptance_re)
(corpus, word_axes) = embeddings_resolover.project_embeddings(projection_model, x_dim=0, y_dim=1)
html = produce_scattertext_explorer(corpus=corpus, category=category, minimum_term_frequency=0, sort_by_dist=False, x_coords=scale(word_axes['x']), y_coords=scale(word_axes['y']), y_label=, x_label=, show_axes=show_axes, **kwargs)
return html | def produce_projection_explorer(corpus, category, word2vec_model=None, projection_model=None, embeddings=None, term_acceptance_re=re.compile('[a-z]{3,}'), show_axes=False, **kwargs):
"\n Parameters\n ----------\n corpus : ParsedCorpus\n It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`\n category : str\n word2vec_model : Word2Vec\n A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default\n model.\n projection_model : sklearn-style dimensionality reduction model.\n By default: umap.UMAP(min_dist=0.5, metric='cosine')\n You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23)\n embeddings : array[len(corpus.get_terms()), X]\n Word embeddings. If None (default), will train them using word2vec Model\n term_acceptance_re : SRE_Pattern,\n Regular expression to identify valid terms\n show_axes : bool, default False\n Show the ticked axes on the plot. If false, show inner axes as a crosshair.\n kwargs : dict\n Remaining produce_scattertext_explorer keywords get_tooltip_content\n\n Returns\n -------\n str\n HTML of visualization\n\n "
embeddings_resolover = EmbeddingsResolver(corpus)
if (embeddings is not None):
embeddings_resolover.set_embeddings(embeddings)
else:
embeddings_resolover.set_embeddings_model(word2vec_model, term_acceptance_re)
(corpus, word_axes) = embeddings_resolover.project_embeddings(projection_model, x_dim=0, y_dim=1)
html = produce_scattertext_explorer(corpus=corpus, category=category, minimum_term_frequency=0, sort_by_dist=False, x_coords=scale(word_axes['x']), y_coords=scale(word_axes['y']), y_label=, x_label=, show_axes=show_axes, **kwargs)
return html<|docstring|>Parameters
----------
corpus : ParsedCorpus
It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`
category : str
word2vec_model : Word2Vec
A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default
model.
projection_model : sklearn-style dimensionality reduction model.
By default: umap.UMAP(min_dist=0.5, metric='cosine')
You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23)
embeddings : array[len(corpus.get_terms()), X]
Word embeddings. If None (default), will train them using word2vec Model
term_acceptance_re : SRE_Pattern,
Regular expression to identify valid terms
show_axes : bool, default False
Show the ticked axes on the plot. If false, show inner axes as a crosshair.
kwargs : dict
Remaining produce_scattertext_explorer keywords get_tooltip_content
Returns
-------
str
HTML of visualization<|endoftext|> |
d4685dd38f0716bab30e6f9d3c748627a6c4de990d95572a25b06ace4bd14935 | def produce_pca_explorer(corpus, category, word2vec_model=None, projection_model=None, embeddings=None, projection=None, term_acceptance_re=re.compile('[a-z]{3,}'), x_dim=0, y_dim=1, scaler=scale, show_axes=False, show_dimensions_on_tooltip=True, x_label='', y_label='', **kwargs):
"\n Parameters\n ----------\n corpus : ParsedCorpus\n It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`\n category : str\n word2vec_model : Word2Vec\n A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default\n model.\n projection_model : sklearn-style dimensionality reduction model. Ignored if 'projection' is presents\n By default: umap.UMAP(min_dist=0.5, metric='cosine') unless projection is present. If so,\n You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23)\n embeddings : array[len(corpus.get_terms()), X]\n Word embeddings. If None (default), and no value is passed into projection, use word2vec_model\n projection : DataFrame('x': array[len(corpus.get_terms())], 'y': array[len(corpus.get_terms())])\n If None (default), produced using projection_model\n term_acceptance_re : SRE_Pattern,\n Regular expression to identify valid terms\n x_dim : int, default 0\n Dimension of transformation matrix for x-axis\n y_dim : int, default 1\n Dimension of transformation matrix for y-axis\n scalers : function , default scattertext.Scalers.scale\n Function used to scale projection\n show_axes : bool, default False\n Show the ticked axes on the plot. If false, show inner axes as a crosshair.\n show_dimensions_on_tooltip : bool, False by default\n If true, shows dimension positions on tooltip, along with term name. Otherwise, default to the\n get_tooltip_content parameter.\n kwargs : dict\n Remaining produce_scattertext_explorer keywords get_tooltip_content\n\n Returns\n -------\n str\n HTML of visualization\n "
if (projection is None):
embeddings_resolover = EmbeddingsResolver(corpus)
if (embeddings is not None):
embeddings_resolover.set_embeddings(embeddings)
else:
embeddings_resolover.set_embeddings_model(word2vec_model, term_acceptance_re)
(corpus, projection) = embeddings_resolover.project_embeddings(projection_model, x_dim=x_dim, y_dim=y_dim)
else:
assert (type(projection) == pd.DataFrame)
assert (('x' in projection) and ('y' in projection))
if kwargs.get('use_non_text_features', False):
assert (set(projection.index) == set(corpus.get_metadata()))
else:
assert (set(projection.index) == set(corpus.get_terms()))
if show_dimensions_on_tooltip:
kwargs['get_tooltip_content'] = ('(function(d) {\n return d.term + "<br/>Dim %s: " + Math.round(d.ox*1000)/1000 + "<br/>Dim %s: " + Math.round(d.oy*1000)/1000 \n })' % (x_dim, y_dim))
html = produce_scattertext_explorer(corpus=corpus, category=category, minimum_term_frequency=0, sort_by_dist=False, original_x=projection['x'], original_y=projection['y'], x_coords=scaler(projection['x']), y_coords=scaler(projection['y']), y_label=y_label, x_label=x_label, show_axes=show_axes, horizontal_line_y_position=kwargs.get('horizontal_line_y_position', 0), vertical_line_x_position=kwargs.get('vertical_line_x_position', 0), **kwargs)
return html | Parameters
----------
corpus : ParsedCorpus
It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`
category : str
word2vec_model : Word2Vec
A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default
model.
projection_model : sklearn-style dimensionality reduction model. Ignored if 'projection' is presents
By default: umap.UMAP(min_dist=0.5, metric='cosine') unless projection is present. If so,
You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23)
embeddings : array[len(corpus.get_terms()), X]
Word embeddings. If None (default), and no value is passed into projection, use word2vec_model
projection : DataFrame('x': array[len(corpus.get_terms())], 'y': array[len(corpus.get_terms())])
If None (default), produced using projection_model
term_acceptance_re : SRE_Pattern,
Regular expression to identify valid terms
x_dim : int, default 0
Dimension of transformation matrix for x-axis
y_dim : int, default 1
Dimension of transformation matrix for y-axis
scalers : function , default scattertext.Scalers.scale
Function used to scale projection
show_axes : bool, default False
Show the ticked axes on the plot. If false, show inner axes as a crosshair.
show_dimensions_on_tooltip : bool, False by default
If true, shows dimension positions on tooltip, along with term name. Otherwise, default to the
get_tooltip_content parameter.
kwargs : dict
Remaining produce_scattertext_explorer keywords get_tooltip_content
Returns
-------
str
HTML of visualization | scattertext/__init__.py | produce_pca_explorer | JasonKessler/scattertext | 1,823 | python | def produce_pca_explorer(corpus, category, word2vec_model=None, projection_model=None, embeddings=None, projection=None, term_acceptance_re=re.compile('[a-z]{3,}'), x_dim=0, y_dim=1, scaler=scale, show_axes=False, show_dimensions_on_tooltip=True, x_label=, y_label=, **kwargs):
"\n Parameters\n ----------\n corpus : ParsedCorpus\n It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`\n category : str\n word2vec_model : Word2Vec\n A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default\n model.\n projection_model : sklearn-style dimensionality reduction model. Ignored if 'projection' is presents\n By default: umap.UMAP(min_dist=0.5, metric='cosine') unless projection is present. If so,\n You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23)\n embeddings : array[len(corpus.get_terms()), X]\n Word embeddings. If None (default), and no value is passed into projection, use word2vec_model\n projection : DataFrame('x': array[len(corpus.get_terms())], 'y': array[len(corpus.get_terms())])\n If None (default), produced using projection_model\n term_acceptance_re : SRE_Pattern,\n Regular expression to identify valid terms\n x_dim : int, default 0\n Dimension of transformation matrix for x-axis\n y_dim : int, default 1\n Dimension of transformation matrix for y-axis\n scalers : function , default scattertext.Scalers.scale\n Function used to scale projection\n show_axes : bool, default False\n Show the ticked axes on the plot. If false, show inner axes as a crosshair.\n show_dimensions_on_tooltip : bool, False by default\n If true, shows dimension positions on tooltip, along with term name. Otherwise, default to the\n get_tooltip_content parameter.\n kwargs : dict\n Remaining produce_scattertext_explorer keywords get_tooltip_content\n\n Returns\n -------\n str\n HTML of visualization\n "
if (projection is None):
embeddings_resolover = EmbeddingsResolver(corpus)
if (embeddings is not None):
embeddings_resolover.set_embeddings(embeddings)
else:
embeddings_resolover.set_embeddings_model(word2vec_model, term_acceptance_re)
(corpus, projection) = embeddings_resolover.project_embeddings(projection_model, x_dim=x_dim, y_dim=y_dim)
else:
assert (type(projection) == pd.DataFrame)
assert (('x' in projection) and ('y' in projection))
if kwargs.get('use_non_text_features', False):
assert (set(projection.index) == set(corpus.get_metadata()))
else:
assert (set(projection.index) == set(corpus.get_terms()))
if show_dimensions_on_tooltip:
kwargs['get_tooltip_content'] = ('(function(d) {\n return d.term + "<br/>Dim %s: " + Math.round(d.ox*1000)/1000 + "<br/>Dim %s: " + Math.round(d.oy*1000)/1000 \n })' % (x_dim, y_dim))
html = produce_scattertext_explorer(corpus=corpus, category=category, minimum_term_frequency=0, sort_by_dist=False, original_x=projection['x'], original_y=projection['y'], x_coords=scaler(projection['x']), y_coords=scaler(projection['y']), y_label=y_label, x_label=x_label, show_axes=show_axes, horizontal_line_y_position=kwargs.get('horizontal_line_y_position', 0), vertical_line_x_position=kwargs.get('vertical_line_x_position', 0), **kwargs)
return html | def produce_pca_explorer(corpus, category, word2vec_model=None, projection_model=None, embeddings=None, projection=None, term_acceptance_re=re.compile('[a-z]{3,}'), x_dim=0, y_dim=1, scaler=scale, show_axes=False, show_dimensions_on_tooltip=True, x_label=, y_label=, **kwargs):
"\n Parameters\n ----------\n corpus : ParsedCorpus\n It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`\n category : str\n word2vec_model : Word2Vec\n A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default\n model.\n projection_model : sklearn-style dimensionality reduction model. Ignored if 'projection' is presents\n By default: umap.UMAP(min_dist=0.5, metric='cosine') unless projection is present. If so,\n You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23)\n embeddings : array[len(corpus.get_terms()), X]\n Word embeddings. If None (default), and no value is passed into projection, use word2vec_model\n projection : DataFrame('x': array[len(corpus.get_terms())], 'y': array[len(corpus.get_terms())])\n If None (default), produced using projection_model\n term_acceptance_re : SRE_Pattern,\n Regular expression to identify valid terms\n x_dim : int, default 0\n Dimension of transformation matrix for x-axis\n y_dim : int, default 1\n Dimension of transformation matrix for y-axis\n scalers : function , default scattertext.Scalers.scale\n Function used to scale projection\n show_axes : bool, default False\n Show the ticked axes on the plot. If false, show inner axes as a crosshair.\n show_dimensions_on_tooltip : bool, False by default\n If true, shows dimension positions on tooltip, along with term name. Otherwise, default to the\n get_tooltip_content parameter.\n kwargs : dict\n Remaining produce_scattertext_explorer keywords get_tooltip_content\n\n Returns\n -------\n str\n HTML of visualization\n "
if (projection is None):
embeddings_resolover = EmbeddingsResolver(corpus)
if (embeddings is not None):
embeddings_resolover.set_embeddings(embeddings)
else:
embeddings_resolover.set_embeddings_model(word2vec_model, term_acceptance_re)
(corpus, projection) = embeddings_resolover.project_embeddings(projection_model, x_dim=x_dim, y_dim=y_dim)
else:
assert (type(projection) == pd.DataFrame)
assert (('x' in projection) and ('y' in projection))
if kwargs.get('use_non_text_features', False):
assert (set(projection.index) == set(corpus.get_metadata()))
else:
assert (set(projection.index) == set(corpus.get_terms()))
if show_dimensions_on_tooltip:
kwargs['get_tooltip_content'] = ('(function(d) {\n return d.term + "<br/>Dim %s: " + Math.round(d.ox*1000)/1000 + "<br/>Dim %s: " + Math.round(d.oy*1000)/1000 \n })' % (x_dim, y_dim))
html = produce_scattertext_explorer(corpus=corpus, category=category, minimum_term_frequency=0, sort_by_dist=False, original_x=projection['x'], original_y=projection['y'], x_coords=scaler(projection['x']), y_coords=scaler(projection['y']), y_label=y_label, x_label=x_label, show_axes=show_axes, horizontal_line_y_position=kwargs.get('horizontal_line_y_position', 0), vertical_line_x_position=kwargs.get('vertical_line_x_position', 0), **kwargs)
return html<|docstring|>Parameters
----------
corpus : ParsedCorpus
It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`
category : str
word2vec_model : Word2Vec
A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default
model.
projection_model : sklearn-style dimensionality reduction model. Ignored if 'projection' is presents
By default: umap.UMAP(min_dist=0.5, metric='cosine') unless projection is present. If so,
You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23)
embeddings : array[len(corpus.get_terms()), X]
Word embeddings. If None (default), and no value is passed into projection, use word2vec_model
projection : DataFrame('x': array[len(corpus.get_terms())], 'y': array[len(corpus.get_terms())])
If None (default), produced using projection_model
term_acceptance_re : SRE_Pattern,
Regular expression to identify valid terms
x_dim : int, default 0
Dimension of transformation matrix for x-axis
y_dim : int, default 1
Dimension of transformation matrix for y-axis
scalers : function , default scattertext.Scalers.scale
Function used to scale projection
show_axes : bool, default False
Show the ticked axes on the plot. If false, show inner axes as a crosshair.
show_dimensions_on_tooltip : bool, False by default
If true, shows dimension positions on tooltip, along with term name. Otherwise, default to the
get_tooltip_content parameter.
kwargs : dict
Remaining produce_scattertext_explorer keywords get_tooltip_content
Returns
-------
str
HTML of visualization<|endoftext|> |
e2021d9654767be892baf9b5aab4976b7e22f63367a8286c0ef3fa69dc00ac8e | def produce_characteristic_explorer(corpus, category, category_name=None, not_category_name=None, not_categories=None, characteristic_scorer=DenseRankCharacteristicness(), term_ranker=termranking.AbsoluteFrequencyRanker, term_scorer=RankDifference(), x_label='Characteristic to Corpus', y_label=None, y_axis_labels=None, scores=None, vertical_lines=None, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`\n category : str\n category_name : str\n not_category_name : str\n not_categories : list\n characteristic_scorer : CharacteristicScorer\n term_ranker\n term_scorer\n term_acceptance_re : SRE_Pattern\n Regular expression to identify valid terms\n kwargs : dict\n remaining produce_scattertext_explorer keywords\n\n Returns\n -------\n str HTML of visualization\n\n '
if (not_categories is None):
not_categories = [c for c in corpus.get_categories() if (c != category)]
(category_name, not_category_name) = get_category_names(category, category_name, not_categories, not_category_name)
(zero_point, characteristic_scores) = characteristic_scorer.get_scores(corpus)
corpus = corpus.remove_terms((set(corpus.get_terms()) - set(characteristic_scores.index)))
characteristic_scores = characteristic_scores.loc[corpus.get_terms()]
term_freq_df = term_ranker(corpus).get_ranks()
scores = (term_scorer.get_scores(term_freq_df[(category + ' freq')], term_freq_df[[(c + ' freq') for c in not_categories]].sum(axis=1)) if (scores is None) else scores)
scores_scaled_for_charting = scale_neg_1_to_1_with_zero_mean_abs_max(scores)
html = produce_scattertext_explorer(corpus=corpus, category=category, category_name=category_name, not_category_name=not_category_name, not_categories=not_categories, minimum_term_frequency=0, sort_by_dist=False, x_coords=characteristic_scores, y_coords=scores_scaled_for_charting, y_axis_labels=([('More ' + not_category_name), 'Even', ('More ' + category_name)] if (y_axis_labels is None) else y_axis_labels), x_label=x_label, y_label=(term_scorer.get_name() if (y_label is None) else y_label), vertical_lines=([] if (vertical_lines is None) else vertical_lines), characteristic_scorer=characteristic_scorer, **kwargs)
return html | Parameters
----------
corpus : Corpus
It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`
category : str
category_name : str
not_category_name : str
not_categories : list
characteristic_scorer : CharacteristicScorer
term_ranker
term_scorer
term_acceptance_re : SRE_Pattern
Regular expression to identify valid terms
kwargs : dict
remaining produce_scattertext_explorer keywords
Returns
-------
str HTML of visualization | scattertext/__init__.py | produce_characteristic_explorer | JasonKessler/scattertext | 1,823 | python | def produce_characteristic_explorer(corpus, category, category_name=None, not_category_name=None, not_categories=None, characteristic_scorer=DenseRankCharacteristicness(), term_ranker=termranking.AbsoluteFrequencyRanker, term_scorer=RankDifference(), x_label='Characteristic to Corpus', y_label=None, y_axis_labels=None, scores=None, vertical_lines=None, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`\n category : str\n category_name : str\n not_category_name : str\n not_categories : list\n characteristic_scorer : CharacteristicScorer\n term_ranker\n term_scorer\n term_acceptance_re : SRE_Pattern\n Regular expression to identify valid terms\n kwargs : dict\n remaining produce_scattertext_explorer keywords\n\n Returns\n -------\n str HTML of visualization\n\n '
if (not_categories is None):
not_categories = [c for c in corpus.get_categories() if (c != category)]
(category_name, not_category_name) = get_category_names(category, category_name, not_categories, not_category_name)
(zero_point, characteristic_scores) = characteristic_scorer.get_scores(corpus)
corpus = corpus.remove_terms((set(corpus.get_terms()) - set(characteristic_scores.index)))
characteristic_scores = characteristic_scores.loc[corpus.get_terms()]
term_freq_df = term_ranker(corpus).get_ranks()
scores = (term_scorer.get_scores(term_freq_df[(category + ' freq')], term_freq_df[[(c + ' freq') for c in not_categories]].sum(axis=1)) if (scores is None) else scores)
scores_scaled_for_charting = scale_neg_1_to_1_with_zero_mean_abs_max(scores)
html = produce_scattertext_explorer(corpus=corpus, category=category, category_name=category_name, not_category_name=not_category_name, not_categories=not_categories, minimum_term_frequency=0, sort_by_dist=False, x_coords=characteristic_scores, y_coords=scores_scaled_for_charting, y_axis_labels=([('More ' + not_category_name), 'Even', ('More ' + category_name)] if (y_axis_labels is None) else y_axis_labels), x_label=x_label, y_label=(term_scorer.get_name() if (y_label is None) else y_label), vertical_lines=([] if (vertical_lines is None) else vertical_lines), characteristic_scorer=characteristic_scorer, **kwargs)
return html | def produce_characteristic_explorer(corpus, category, category_name=None, not_category_name=None, not_categories=None, characteristic_scorer=DenseRankCharacteristicness(), term_ranker=termranking.AbsoluteFrequencyRanker, term_scorer=RankDifference(), x_label='Characteristic to Corpus', y_label=None, y_axis_labels=None, scores=None, vertical_lines=None, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`\n category : str\n category_name : str\n not_category_name : str\n not_categories : list\n characteristic_scorer : CharacteristicScorer\n term_ranker\n term_scorer\n term_acceptance_re : SRE_Pattern\n Regular expression to identify valid terms\n kwargs : dict\n remaining produce_scattertext_explorer keywords\n\n Returns\n -------\n str HTML of visualization\n\n '
if (not_categories is None):
not_categories = [c for c in corpus.get_categories() if (c != category)]
(category_name, not_category_name) = get_category_names(category, category_name, not_categories, not_category_name)
(zero_point, characteristic_scores) = characteristic_scorer.get_scores(corpus)
corpus = corpus.remove_terms((set(corpus.get_terms()) - set(characteristic_scores.index)))
characteristic_scores = characteristic_scores.loc[corpus.get_terms()]
term_freq_df = term_ranker(corpus).get_ranks()
scores = (term_scorer.get_scores(term_freq_df[(category + ' freq')], term_freq_df[[(c + ' freq') for c in not_categories]].sum(axis=1)) if (scores is None) else scores)
scores_scaled_for_charting = scale_neg_1_to_1_with_zero_mean_abs_max(scores)
html = produce_scattertext_explorer(corpus=corpus, category=category, category_name=category_name, not_category_name=not_category_name, not_categories=not_categories, minimum_term_frequency=0, sort_by_dist=False, x_coords=characteristic_scores, y_coords=scores_scaled_for_charting, y_axis_labels=([('More ' + not_category_name), 'Even', ('More ' + category_name)] if (y_axis_labels is None) else y_axis_labels), x_label=x_label, y_label=(term_scorer.get_name() if (y_label is None) else y_label), vertical_lines=([] if (vertical_lines is None) else vertical_lines), characteristic_scorer=characteristic_scorer, **kwargs)
return html<|docstring|>Parameters
----------
corpus : Corpus
It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()`
category : str
category_name : str
not_category_name : str
not_categories : list
characteristic_scorer : CharacteristicScorer
term_ranker
term_scorer
term_acceptance_re : SRE_Pattern
Regular expression to identify valid terms
kwargs : dict
remaining produce_scattertext_explorer keywords
Returns
-------
str HTML of visualization<|endoftext|> |
2cd27fe9bf6f230e49845ac0136b6f22496fa71f5bde5de1b313855975c32bd7 | def sparse_explorer(corpus, category, scores, category_name=None, not_category_name=None, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n scores : np.array\n Scores to display in visualization. Zero scores are grey.\n\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, gray_zero_scores=True, **kwargs) | Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
scores : np.array
Scores to display in visualization. Zero scores are grey.
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization | scattertext/__init__.py | sparse_explorer | JasonKessler/scattertext | 1,823 | python | def sparse_explorer(corpus, category, scores, category_name=None, not_category_name=None, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n scores : np.array\n Scores to display in visualization. Zero scores are grey.\n\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, gray_zero_scores=True, **kwargs) | def sparse_explorer(corpus, category, scores, category_name=None, not_category_name=None, **kwargs):
'\n Parameters\n ----------\n corpus : Corpus\n Corpus to use.\n category : str\n Name of category column as it appears in original data frame.\n category_name : str\n Name of category to use. E.g., "5-star reviews."\n not_category_name : str\n Name of everything that isn\'t in category. E.g., "Below 5-star reviews".\n scores : np.array\n Scores to display in visualization. Zero scores are grey.\n\n Remaining arguments are from `produce_scattertext_explorer`.\n\n Returns\n -------\n str, html of visualization\n '
return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, gray_zero_scores=True, **kwargs)<|docstring|>Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
scores : np.array
Scores to display in visualization. Zero scores are grey.
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization<|endoftext|> |
06d36e8ad97bf554053debc968bf6f74320149612cf99df61dec0d7fe01d4d38 | def produce_two_axis_plot(corpus, x_score_df, y_score_df, x_label, y_label, statistic_column='cohens_d', p_value_column='cohens_d_p', statistic_name='d', use_non_text_features=False, pick_color=pick_color, axis_scaler=scale_neg_1_to_1_with_zero_mean, distance_measure=EuclideanDistance, semiotic_square_labels=None, x_tooltip_label=None, y_tooltip_label=None, **kwargs):
'\n\n :param corpus: Corpus\n :param x_score_df: pd.DataFrame, contains effect_size_column, p_value_column. outputted by CohensD\n :param y_score_df: pd.DataFrame, contains effect_size_column, p_value_column. outputted by CohensD\n :param x_label: str\n :param y_label: str\n :param statistic_column: str, column in x_score_df, y_score_df giving statistics, default cohens_d\n :param p_value_column: str, column in x_score_df, y_score_df giving effect sizes, default cohens_d_p\n :param statistic_name: str, column which corresponds to statistic name, defauld d\n :param use_non_text_features: bool, default True\n :param pick_color: func, returns color, default is pick_color\n :param axis_scaler: func, scaler default is scale_neg_1_to_1_with_zero_mean\n :param distance_measure: DistanceMeasureBase, default EuclideanDistance\n This is how parts of the square are populated\n :param semiotic_square_labels: dict, semiotic square position labels\n :param x_tooltip_label: str, if None, x_label\n :param y_tooltip_label: str, if None, y_label\n :param kwargs: dict, other arguments\n :return: str, html\n '
if use_non_text_features:
terms = corpus.get_metadata()
else:
terms = corpus.get_terms()
axes = pd.DataFrame({'x': x_score_df[statistic_column], 'y': y_score_df[statistic_column]}).loc[terms]
merged_scores = pd.merge(x_score_df, y_score_df, left_index=True, right_index=True).loc[terms]
x_tooltip_label = (x_label if (x_tooltip_label is None) else x_tooltip_label)
y_tooltip_label = (y_label if (y_tooltip_label is None) else y_tooltip_label)
def generate_term_metadata(term_struct):
if ((p_value_column + '_corr_x') in term_struct):
x_p = term_struct[(p_value_column + '_corr_x')]
elif ((p_value_column + '_x') in term_struct):
x_p = term_struct[(p_value_column + '_x')]
else:
x_p = None
if ((p_value_column + '_corr_y') in term_struct):
y_p = term_struct[(p_value_column + '_corr_y')]
elif ((p_value_column + '_y') in term_struct):
y_p = term_struct[(p_value_column + '_y')]
else:
y_p = None
if (x_p is not None):
x_p = min(x_p, (1.0 - x_p))
if (y_p is not None):
y_p = min(y_p, (1.0 - y_p))
x_d = term_struct[(statistic_column + '_x')]
y_d = term_struct[(statistic_column + '_y')]
tooltip = ('%s: %s: %0.3f' % (x_tooltip_label, statistic_name, x_d))
if (x_p is not None):
tooltip += ('; p: %0.4f' % x_p)
tooltip += '<br/>'
tooltip += ('%s: %s: %0.3f' % (y_tooltip_label, statistic_name, y_d))
if (y_p is not None):
tooltip += ('; p: %0.4f' % y_p)
return {'tooltip': tooltip, 'color': pick_color(x_p, y_p, np.abs(x_d), np.abs(y_d))}
explanations = merged_scores.apply(generate_term_metadata, axis=1)
semiotic_square = SemioticSquareFromAxes(corpus, axes, x_axis_name=x_label, y_axis_name=y_label, labels=semiotic_square_labels, distance_measure=distance_measure)
get_tooltip_content = kwargs.get('get_tooltip_content', '(function(d) {return d.term + "<br/> " + d.etc.tooltip})')
color_func = kwargs.get('color_func', '(function(d) {return d.etc.color})')
html = produce_scattertext_explorer(corpus, category=corpus.get_categories()[0], sort_by_dist=False, x_coords=axis_scaler(axes['x']), y_coords=axis_scaler(axes['y']), original_x=axes['x'], original_y=axes['y'], show_characteristic=False, show_top_terms=False, show_category_headings=True, x_label=x_label, y_label=y_label, semiotic_square=semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=None, y_axis_values=None, unified_context=True, color_func=color_func, show_axes=False, term_metadata=explanations.to_dict(), use_non_text_features=use_non_text_features, **kwargs)
return html | :param corpus: Corpus
:param x_score_df: pd.DataFrame, contains effect_size_column, p_value_column. outputted by CohensD
:param y_score_df: pd.DataFrame, contains effect_size_column, p_value_column. outputted by CohensD
:param x_label: str
:param y_label: str
:param statistic_column: str, column in x_score_df, y_score_df giving statistics, default cohens_d
:param p_value_column: str, column in x_score_df, y_score_df giving effect sizes, default cohens_d_p
:param statistic_name: str, column which corresponds to statistic name, defauld d
:param use_non_text_features: bool, default True
:param pick_color: func, returns color, default is pick_color
:param axis_scaler: func, scaler default is scale_neg_1_to_1_with_zero_mean
:param distance_measure: DistanceMeasureBase, default EuclideanDistance
This is how parts of the square are populated
:param semiotic_square_labels: dict, semiotic square position labels
:param x_tooltip_label: str, if None, x_label
:param y_tooltip_label: str, if None, y_label
:param kwargs: dict, other arguments
:return: str, html | scattertext/__init__.py | produce_two_axis_plot | JasonKessler/scattertext | 1,823 | python | def produce_two_axis_plot(corpus, x_score_df, y_score_df, x_label, y_label, statistic_column='cohens_d', p_value_column='cohens_d_p', statistic_name='d', use_non_text_features=False, pick_color=pick_color, axis_scaler=scale_neg_1_to_1_with_zero_mean, distance_measure=EuclideanDistance, semiotic_square_labels=None, x_tooltip_label=None, y_tooltip_label=None, **kwargs):
'\n\n :param corpus: Corpus\n :param x_score_df: pd.DataFrame, contains effect_size_column, p_value_column. outputted by CohensD\n :param y_score_df: pd.DataFrame, contains effect_size_column, p_value_column. outputted by CohensD\n :param x_label: str\n :param y_label: str\n :param statistic_column: str, column in x_score_df, y_score_df giving statistics, default cohens_d\n :param p_value_column: str, column in x_score_df, y_score_df giving effect sizes, default cohens_d_p\n :param statistic_name: str, column which corresponds to statistic name, defauld d\n :param use_non_text_features: bool, default True\n :param pick_color: func, returns color, default is pick_color\n :param axis_scaler: func, scaler default is scale_neg_1_to_1_with_zero_mean\n :param distance_measure: DistanceMeasureBase, default EuclideanDistance\n This is how parts of the square are populated\n :param semiotic_square_labels: dict, semiotic square position labels\n :param x_tooltip_label: str, if None, x_label\n :param y_tooltip_label: str, if None, y_label\n :param kwargs: dict, other arguments\n :return: str, html\n '
if use_non_text_features:
terms = corpus.get_metadata()
else:
terms = corpus.get_terms()
axes = pd.DataFrame({'x': x_score_df[statistic_column], 'y': y_score_df[statistic_column]}).loc[terms]
merged_scores = pd.merge(x_score_df, y_score_df, left_index=True, right_index=True).loc[terms]
x_tooltip_label = (x_label if (x_tooltip_label is None) else x_tooltip_label)
y_tooltip_label = (y_label if (y_tooltip_label is None) else y_tooltip_label)
def generate_term_metadata(term_struct):
if ((p_value_column + '_corr_x') in term_struct):
x_p = term_struct[(p_value_column + '_corr_x')]
elif ((p_value_column + '_x') in term_struct):
x_p = term_struct[(p_value_column + '_x')]
else:
x_p = None
if ((p_value_column + '_corr_y') in term_struct):
y_p = term_struct[(p_value_column + '_corr_y')]
elif ((p_value_column + '_y') in term_struct):
y_p = term_struct[(p_value_column + '_y')]
else:
y_p = None
if (x_p is not None):
x_p = min(x_p, (1.0 - x_p))
if (y_p is not None):
y_p = min(y_p, (1.0 - y_p))
x_d = term_struct[(statistic_column + '_x')]
y_d = term_struct[(statistic_column + '_y')]
tooltip = ('%s: %s: %0.3f' % (x_tooltip_label, statistic_name, x_d))
if (x_p is not None):
tooltip += ('; p: %0.4f' % x_p)
tooltip += '<br/>'
tooltip += ('%s: %s: %0.3f' % (y_tooltip_label, statistic_name, y_d))
if (y_p is not None):
tooltip += ('; p: %0.4f' % y_p)
return {'tooltip': tooltip, 'color': pick_color(x_p, y_p, np.abs(x_d), np.abs(y_d))}
explanations = merged_scores.apply(generate_term_metadata, axis=1)
semiotic_square = SemioticSquareFromAxes(corpus, axes, x_axis_name=x_label, y_axis_name=y_label, labels=semiotic_square_labels, distance_measure=distance_measure)
get_tooltip_content = kwargs.get('get_tooltip_content', '(function(d) {return d.term + "<br/> " + d.etc.tooltip})')
color_func = kwargs.get('color_func', '(function(d) {return d.etc.color})')
html = produce_scattertext_explorer(corpus, category=corpus.get_categories()[0], sort_by_dist=False, x_coords=axis_scaler(axes['x']), y_coords=axis_scaler(axes['y']), original_x=axes['x'], original_y=axes['y'], show_characteristic=False, show_top_terms=False, show_category_headings=True, x_label=x_label, y_label=y_label, semiotic_square=semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=None, y_axis_values=None, unified_context=True, color_func=color_func, show_axes=False, term_metadata=explanations.to_dict(), use_non_text_features=use_non_text_features, **kwargs)
return html | def produce_two_axis_plot(corpus, x_score_df, y_score_df, x_label, y_label, statistic_column='cohens_d', p_value_column='cohens_d_p', statistic_name='d', use_non_text_features=False, pick_color=pick_color, axis_scaler=scale_neg_1_to_1_with_zero_mean, distance_measure=EuclideanDistance, semiotic_square_labels=None, x_tooltip_label=None, y_tooltip_label=None, **kwargs):
'\n\n :param corpus: Corpus\n :param x_score_df: pd.DataFrame, contains effect_size_column, p_value_column. outputted by CohensD\n :param y_score_df: pd.DataFrame, contains effect_size_column, p_value_column. outputted by CohensD\n :param x_label: str\n :param y_label: str\n :param statistic_column: str, column in x_score_df, y_score_df giving statistics, default cohens_d\n :param p_value_column: str, column in x_score_df, y_score_df giving effect sizes, default cohens_d_p\n :param statistic_name: str, column which corresponds to statistic name, defauld d\n :param use_non_text_features: bool, default True\n :param pick_color: func, returns color, default is pick_color\n :param axis_scaler: func, scaler default is scale_neg_1_to_1_with_zero_mean\n :param distance_measure: DistanceMeasureBase, default EuclideanDistance\n This is how parts of the square are populated\n :param semiotic_square_labels: dict, semiotic square position labels\n :param x_tooltip_label: str, if None, x_label\n :param y_tooltip_label: str, if None, y_label\n :param kwargs: dict, other arguments\n :return: str, html\n '
if use_non_text_features:
terms = corpus.get_metadata()
else:
terms = corpus.get_terms()
axes = pd.DataFrame({'x': x_score_df[statistic_column], 'y': y_score_df[statistic_column]}).loc[terms]
merged_scores = pd.merge(x_score_df, y_score_df, left_index=True, right_index=True).loc[terms]
x_tooltip_label = (x_label if (x_tooltip_label is None) else x_tooltip_label)
y_tooltip_label = (y_label if (y_tooltip_label is None) else y_tooltip_label)
def generate_term_metadata(term_struct):
if ((p_value_column + '_corr_x') in term_struct):
x_p = term_struct[(p_value_column + '_corr_x')]
elif ((p_value_column + '_x') in term_struct):
x_p = term_struct[(p_value_column + '_x')]
else:
x_p = None
if ((p_value_column + '_corr_y') in term_struct):
y_p = term_struct[(p_value_column + '_corr_y')]
elif ((p_value_column + '_y') in term_struct):
y_p = term_struct[(p_value_column + '_y')]
else:
y_p = None
if (x_p is not None):
x_p = min(x_p, (1.0 - x_p))
if (y_p is not None):
y_p = min(y_p, (1.0 - y_p))
x_d = term_struct[(statistic_column + '_x')]
y_d = term_struct[(statistic_column + '_y')]
tooltip = ('%s: %s: %0.3f' % (x_tooltip_label, statistic_name, x_d))
if (x_p is not None):
tooltip += ('; p: %0.4f' % x_p)
tooltip += '<br/>'
tooltip += ('%s: %s: %0.3f' % (y_tooltip_label, statistic_name, y_d))
if (y_p is not None):
tooltip += ('; p: %0.4f' % y_p)
return {'tooltip': tooltip, 'color': pick_color(x_p, y_p, np.abs(x_d), np.abs(y_d))}
explanations = merged_scores.apply(generate_term_metadata, axis=1)
semiotic_square = SemioticSquareFromAxes(corpus, axes, x_axis_name=x_label, y_axis_name=y_label, labels=semiotic_square_labels, distance_measure=distance_measure)
get_tooltip_content = kwargs.get('get_tooltip_content', '(function(d) {return d.term + "<br/> " + d.etc.tooltip})')
color_func = kwargs.get('color_func', '(function(d) {return d.etc.color})')
html = produce_scattertext_explorer(corpus, category=corpus.get_categories()[0], sort_by_dist=False, x_coords=axis_scaler(axes['x']), y_coords=axis_scaler(axes['y']), original_x=axes['x'], original_y=axes['y'], show_characteristic=False, show_top_terms=False, show_category_headings=True, x_label=x_label, y_label=y_label, semiotic_square=semiotic_square, get_tooltip_content=get_tooltip_content, x_axis_values=None, y_axis_values=None, unified_context=True, color_func=color_func, show_axes=False, term_metadata=explanations.to_dict(), use_non_text_features=use_non_text_features, **kwargs)
return html<|docstring|>:param corpus: Corpus
:param x_score_df: pd.DataFrame, contains effect_size_column, p_value_column. outputted by CohensD
:param y_score_df: pd.DataFrame, contains effect_size_column, p_value_column. outputted by CohensD
:param x_label: str
:param y_label: str
:param statistic_column: str, column in x_score_df, y_score_df giving statistics, default cohens_d
:param p_value_column: str, column in x_score_df, y_score_df giving effect sizes, default cohens_d_p
:param statistic_name: str, column which corresponds to statistic name, defauld d
:param use_non_text_features: bool, default True
:param pick_color: func, returns color, default is pick_color
:param axis_scaler: func, scaler default is scale_neg_1_to_1_with_zero_mean
:param distance_measure: DistanceMeasureBase, default EuclideanDistance
This is how parts of the square are populated
:param semiotic_square_labels: dict, semiotic square position labels
:param x_tooltip_label: str, if None, x_label
:param y_tooltip_label: str, if None, y_label
:param kwargs: dict, other arguments
:return: str, html<|endoftext|> |
845538801cacea268aeec1f856af913e8c1ccc646a22a1cf0ad3b2bd613b81b3 | def produce_scattertext_digraph(df, text_col, source_col, dest_col, source_name='Source', dest_name='Destination', graph_width=500, graph_height=500, metadata_func=None, enable_pan_and_zoom=True, engine='dot', graph_params=None, node_params=None, **kwargs):
'\n\n :param df: pd.DataFrame\n :param text_col: str\n :param source_col: str\n :param dest_col: str\n :param source_name: str\n :param dest_name: str\n :param graph_width: int\n :param graph_height: int\n :param metadata_func: lambda\n :param enable_pan_and_zoom: bool\n :param engine: str, The graphviz engine (e.g., dot or neat)\n :param graph_params dict or None, graph parameters in graph viz\n :param node_params dict or None, node parameters in graph viz\n :param kwargs: dicdt\n :return:\n '
graph_df = pd.concat([df.assign(__text=(lambda df: df[source_col]), __alttext=(lambda df: df[text_col]), __category='source'), df.assign(__text=(lambda df: df[dest_col]), __alttext=(lambda df: df[text_col]), __category='target')])
corpus = CorpusFromParsedDocuments(graph_df, category_col='__category', parsed_col='__text', feats_from_spacy_doc=UseFullDocAsMetadata()).build()
edges = corpus.get_df()[[source_col, dest_col]].rename(columns={source_col: 'source', dest_col: 'target'}).drop_duplicates()
component_graph = SimpleDiGraph(edges).make_component_digraph(graph_params=graph_params, node_params=node_params)
graph_renderer = ComponentDiGraphHTMLRenderer(component_graph, height=graph_height, width=graph_width, enable_pan_and_zoom=enable_pan_and_zoom, engine=engine)
alternative_term_func = '(function(termDict) {\n document.querySelectorAll(".dotgraph").forEach(svg => svg.style.display = \'none\');\n showTermGraph(termDict[\'term\']);\n return true;\n })'
scatterplot_structure = produce_scattertext_explorer(corpus, category='source', category_name=source_name, not_category_name=dest_name, minimum_term_frequency=0, pmi_threshold_coefficient=0, alternative_text_field='__alttext', use_non_text_features=True, transform=dense_rank, metadata=(corpus.get_df().apply(metadata_func, axis=1) if metadata_func else None), return_scatterplot_structure=True, width_in_pixels=kwargs.get('width_in_pixels', 700), max_overlapping=kwargs.get('max_overlapping', 3), color_func=kwargs.get('color_func', '(function(x) {return "#5555FF"})'), alternative_term_func=alternative_term_func, **kwargs)
html = GraphStructure(scatterplot_structure, graph_renderer=graph_renderer).to_html()
return html | :param df: pd.DataFrame
:param text_col: str
:param source_col: str
:param dest_col: str
:param source_name: str
:param dest_name: str
:param graph_width: int
:param graph_height: int
:param metadata_func: lambda
:param enable_pan_and_zoom: bool
:param engine: str, The graphviz engine (e.g., dot or neat)
:param graph_params dict or None, graph parameters in graph viz
:param node_params dict or None, node parameters in graph viz
:param kwargs: dicdt
:return: | scattertext/__init__.py | produce_scattertext_digraph | JasonKessler/scattertext | 1,823 | python | def produce_scattertext_digraph(df, text_col, source_col, dest_col, source_name='Source', dest_name='Destination', graph_width=500, graph_height=500, metadata_func=None, enable_pan_and_zoom=True, engine='dot', graph_params=None, node_params=None, **kwargs):
'\n\n :param df: pd.DataFrame\n :param text_col: str\n :param source_col: str\n :param dest_col: str\n :param source_name: str\n :param dest_name: str\n :param graph_width: int\n :param graph_height: int\n :param metadata_func: lambda\n :param enable_pan_and_zoom: bool\n :param engine: str, The graphviz engine (e.g., dot or neat)\n :param graph_params dict or None, graph parameters in graph viz\n :param node_params dict or None, node parameters in graph viz\n :param kwargs: dicdt\n :return:\n '
graph_df = pd.concat([df.assign(__text=(lambda df: df[source_col]), __alttext=(lambda df: df[text_col]), __category='source'), df.assign(__text=(lambda df: df[dest_col]), __alttext=(lambda df: df[text_col]), __category='target')])
corpus = CorpusFromParsedDocuments(graph_df, category_col='__category', parsed_col='__text', feats_from_spacy_doc=UseFullDocAsMetadata()).build()
edges = corpus.get_df()[[source_col, dest_col]].rename(columns={source_col: 'source', dest_col: 'target'}).drop_duplicates()
component_graph = SimpleDiGraph(edges).make_component_digraph(graph_params=graph_params, node_params=node_params)
graph_renderer = ComponentDiGraphHTMLRenderer(component_graph, height=graph_height, width=graph_width, enable_pan_and_zoom=enable_pan_and_zoom, engine=engine)
alternative_term_func = '(function(termDict) {\n document.querySelectorAll(".dotgraph").forEach(svg => svg.style.display = \'none\');\n showTermGraph(termDict[\'term\']);\n return true;\n })'
scatterplot_structure = produce_scattertext_explorer(corpus, category='source', category_name=source_name, not_category_name=dest_name, minimum_term_frequency=0, pmi_threshold_coefficient=0, alternative_text_field='__alttext', use_non_text_features=True, transform=dense_rank, metadata=(corpus.get_df().apply(metadata_func, axis=1) if metadata_func else None), return_scatterplot_structure=True, width_in_pixels=kwargs.get('width_in_pixels', 700), max_overlapping=kwargs.get('max_overlapping', 3), color_func=kwargs.get('color_func', '(function(x) {return "#5555FF"})'), alternative_term_func=alternative_term_func, **kwargs)
html = GraphStructure(scatterplot_structure, graph_renderer=graph_renderer).to_html()
return html | def produce_scattertext_digraph(df, text_col, source_col, dest_col, source_name='Source', dest_name='Destination', graph_width=500, graph_height=500, metadata_func=None, enable_pan_and_zoom=True, engine='dot', graph_params=None, node_params=None, **kwargs):
'\n\n :param df: pd.DataFrame\n :param text_col: str\n :param source_col: str\n :param dest_col: str\n :param source_name: str\n :param dest_name: str\n :param graph_width: int\n :param graph_height: int\n :param metadata_func: lambda\n :param enable_pan_and_zoom: bool\n :param engine: str, The graphviz engine (e.g., dot or neat)\n :param graph_params dict or None, graph parameters in graph viz\n :param node_params dict or None, node parameters in graph viz\n :param kwargs: dicdt\n :return:\n '
graph_df = pd.concat([df.assign(__text=(lambda df: df[source_col]), __alttext=(lambda df: df[text_col]), __category='source'), df.assign(__text=(lambda df: df[dest_col]), __alttext=(lambda df: df[text_col]), __category='target')])
corpus = CorpusFromParsedDocuments(graph_df, category_col='__category', parsed_col='__text', feats_from_spacy_doc=UseFullDocAsMetadata()).build()
edges = corpus.get_df()[[source_col, dest_col]].rename(columns={source_col: 'source', dest_col: 'target'}).drop_duplicates()
component_graph = SimpleDiGraph(edges).make_component_digraph(graph_params=graph_params, node_params=node_params)
graph_renderer = ComponentDiGraphHTMLRenderer(component_graph, height=graph_height, width=graph_width, enable_pan_and_zoom=enable_pan_and_zoom, engine=engine)
alternative_term_func = '(function(termDict) {\n document.querySelectorAll(".dotgraph").forEach(svg => svg.style.display = \'none\');\n showTermGraph(termDict[\'term\']);\n return true;\n })'
scatterplot_structure = produce_scattertext_explorer(corpus, category='source', category_name=source_name, not_category_name=dest_name, minimum_term_frequency=0, pmi_threshold_coefficient=0, alternative_text_field='__alttext', use_non_text_features=True, transform=dense_rank, metadata=(corpus.get_df().apply(metadata_func, axis=1) if metadata_func else None), return_scatterplot_structure=True, width_in_pixels=kwargs.get('width_in_pixels', 700), max_overlapping=kwargs.get('max_overlapping', 3), color_func=kwargs.get('color_func', '(function(x) {return "#5555FF"})'), alternative_term_func=alternative_term_func, **kwargs)
html = GraphStructure(scatterplot_structure, graph_renderer=graph_renderer).to_html()
return html<|docstring|>:param df: pd.DataFrame
:param text_col: str
:param source_col: str
:param dest_col: str
:param source_name: str
:param dest_name: str
:param graph_width: int
:param graph_height: int
:param metadata_func: lambda
:param enable_pan_and_zoom: bool
:param engine: str, The graphviz engine (e.g., dot or neat)
:param graph_params dict or None, graph parameters in graph viz
:param node_params dict or None, node parameters in graph viz
:param kwargs: dicdt
:return:<|endoftext|> |
6ca7136dc4d4596b06a55dded04f5a111e1720c3eafc260e0a1e0b790056f966 | def produce_scattertext_table(corpus, num_rows=10, use_non_text_features=False, plot_width=500, plot_height=700, category_order=None, **kwargs):
'\n\n :param df: pd.DataFrame\n :param text_col: str\n :param source_col: str\n :param dest_col: str\n :param source_name: str\n :param dest_name: str\n :param plot_width: int\n :param plot_height: int\n :param enable_pan_and_zoom: bool\n :param engine: str, The graphviz engine (e.g., dot or neat)\n :param graph_params dict or None, graph parameters in graph viz\n :param node_params dict or None, node parameters in graph viz\n :param category_order list or None, names of categories to show in order\n :param kwargs: dict\n :return: str\n '
alternative_term_func = '(function(termDict) {\n //document.querySelectorAll(".dotgraph").forEach(svg => svg.style.display = \'none\');\n //showTermGraph(termDict[\'term\']);\n //alert(termDict[\'term\'])\n return true;\n })'
graph_renderer = CategoryTableMaker(corpus=corpus, num_rows=num_rows, use_metadata=use_non_text_features, category_order=category_order)
dispersion = Dispersion(corpus, use_categories=True, use_metadata=use_non_text_features)
adjusted_dispersion = dispersion.get_adjusted_metric(dispersion.da(), dispersion.get_frequency())
plot_df = pd.DataFrame().assign(X=dispersion.get_frequency(), Frequency=(lambda df: df.X), Xpos=(lambda df: Scalers.dense_rank(df.X)), Y=(lambda df: adjusted_dispersion), AdjustedDA=(lambda df: df.Y), Ypos=(lambda df: Scalers.scale_neg_1_to_1_with_zero_mean(df.Y)), ColorScore=(lambda df: Scalers.scale_neg_1_to_1_with_zero_mean(df.Y)), term=dispersion.get_names()).set_index('term')
line_df = pd.DataFrame({'x': plot_df.Xpos.values, 'y': 0.5}).sort_values(by='x')
kwargs.setdefault('top_terms_left_buffer', 10)
scatterplot_structure = dataframe_scattertext(corpus, plot_df=plot_df, ignore_categories=False, unified_context=kwargs.get('unified_context', True), x_label='Frequency Rank', y_label='Frequency-adjusted DA', y_axis_labels=['More Concentrated', 'Medium', 'More Dispersion'], color_score_column='ColorScore', tooltip_columns=['Frequency', 'AdjustedDA'], header_names={'upper': 'Dispersed', 'lower': 'Concentrated'}, left_list_column='AdjustedDA', line_coordinates=line_df.to_dict('records'), use_non_text_features=use_non_text_features, return_scatterplot_structure=True, width_in_pixels=plot_width, height_in_pixels=plot_height, **kwargs)
html = TableStructure(scatterplot_structure, graph_renderer=graph_renderer).to_html()
return html | :param df: pd.DataFrame
:param text_col: str
:param source_col: str
:param dest_col: str
:param source_name: str
:param dest_name: str
:param plot_width: int
:param plot_height: int
:param enable_pan_and_zoom: bool
:param engine: str, The graphviz engine (e.g., dot or neat)
:param graph_params dict or None, graph parameters in graph viz
:param node_params dict or None, node parameters in graph viz
:param category_order list or None, names of categories to show in order
:param kwargs: dict
:return: str | scattertext/__init__.py | produce_scattertext_table | JasonKessler/scattertext | 1,823 | python | def produce_scattertext_table(corpus, num_rows=10, use_non_text_features=False, plot_width=500, plot_height=700, category_order=None, **kwargs):
'\n\n :param df: pd.DataFrame\n :param text_col: str\n :param source_col: str\n :param dest_col: str\n :param source_name: str\n :param dest_name: str\n :param plot_width: int\n :param plot_height: int\n :param enable_pan_and_zoom: bool\n :param engine: str, The graphviz engine (e.g., dot or neat)\n :param graph_params dict or None, graph parameters in graph viz\n :param node_params dict or None, node parameters in graph viz\n :param category_order list or None, names of categories to show in order\n :param kwargs: dict\n :return: str\n '
alternative_term_func = '(function(termDict) {\n //document.querySelectorAll(".dotgraph").forEach(svg => svg.style.display = \'none\');\n //showTermGraph(termDict[\'term\']);\n //alert(termDict[\'term\'])\n return true;\n })'
graph_renderer = CategoryTableMaker(corpus=corpus, num_rows=num_rows, use_metadata=use_non_text_features, category_order=category_order)
dispersion = Dispersion(corpus, use_categories=True, use_metadata=use_non_text_features)
adjusted_dispersion = dispersion.get_adjusted_metric(dispersion.da(), dispersion.get_frequency())
plot_df = pd.DataFrame().assign(X=dispersion.get_frequency(), Frequency=(lambda df: df.X), Xpos=(lambda df: Scalers.dense_rank(df.X)), Y=(lambda df: adjusted_dispersion), AdjustedDA=(lambda df: df.Y), Ypos=(lambda df: Scalers.scale_neg_1_to_1_with_zero_mean(df.Y)), ColorScore=(lambda df: Scalers.scale_neg_1_to_1_with_zero_mean(df.Y)), term=dispersion.get_names()).set_index('term')
line_df = pd.DataFrame({'x': plot_df.Xpos.values, 'y': 0.5}).sort_values(by='x')
kwargs.setdefault('top_terms_left_buffer', 10)
scatterplot_structure = dataframe_scattertext(corpus, plot_df=plot_df, ignore_categories=False, unified_context=kwargs.get('unified_context', True), x_label='Frequency Rank', y_label='Frequency-adjusted DA', y_axis_labels=['More Concentrated', 'Medium', 'More Dispersion'], color_score_column='ColorScore', tooltip_columns=['Frequency', 'AdjustedDA'], header_names={'upper': 'Dispersed', 'lower': 'Concentrated'}, left_list_column='AdjustedDA', line_coordinates=line_df.to_dict('records'), use_non_text_features=use_non_text_features, return_scatterplot_structure=True, width_in_pixels=plot_width, height_in_pixels=plot_height, **kwargs)
html = TableStructure(scatterplot_structure, graph_renderer=graph_renderer).to_html()
return html | def produce_scattertext_table(corpus, num_rows=10, use_non_text_features=False, plot_width=500, plot_height=700, category_order=None, **kwargs):
'\n\n :param df: pd.DataFrame\n :param text_col: str\n :param source_col: str\n :param dest_col: str\n :param source_name: str\n :param dest_name: str\n :param plot_width: int\n :param plot_height: int\n :param enable_pan_and_zoom: bool\n :param engine: str, The graphviz engine (e.g., dot or neat)\n :param graph_params dict or None, graph parameters in graph viz\n :param node_params dict or None, node parameters in graph viz\n :param category_order list or None, names of categories to show in order\n :param kwargs: dict\n :return: str\n '
alternative_term_func = '(function(termDict) {\n //document.querySelectorAll(".dotgraph").forEach(svg => svg.style.display = \'none\');\n //showTermGraph(termDict[\'term\']);\n //alert(termDict[\'term\'])\n return true;\n })'
graph_renderer = CategoryTableMaker(corpus=corpus, num_rows=num_rows, use_metadata=use_non_text_features, category_order=category_order)
dispersion = Dispersion(corpus, use_categories=True, use_metadata=use_non_text_features)
adjusted_dispersion = dispersion.get_adjusted_metric(dispersion.da(), dispersion.get_frequency())
plot_df = pd.DataFrame().assign(X=dispersion.get_frequency(), Frequency=(lambda df: df.X), Xpos=(lambda df: Scalers.dense_rank(df.X)), Y=(lambda df: adjusted_dispersion), AdjustedDA=(lambda df: df.Y), Ypos=(lambda df: Scalers.scale_neg_1_to_1_with_zero_mean(df.Y)), ColorScore=(lambda df: Scalers.scale_neg_1_to_1_with_zero_mean(df.Y)), term=dispersion.get_names()).set_index('term')
line_df = pd.DataFrame({'x': plot_df.Xpos.values, 'y': 0.5}).sort_values(by='x')
kwargs.setdefault('top_terms_left_buffer', 10)
scatterplot_structure = dataframe_scattertext(corpus, plot_df=plot_df, ignore_categories=False, unified_context=kwargs.get('unified_context', True), x_label='Frequency Rank', y_label='Frequency-adjusted DA', y_axis_labels=['More Concentrated', 'Medium', 'More Dispersion'], color_score_column='ColorScore', tooltip_columns=['Frequency', 'AdjustedDA'], header_names={'upper': 'Dispersed', 'lower': 'Concentrated'}, left_list_column='AdjustedDA', line_coordinates=line_df.to_dict('records'), use_non_text_features=use_non_text_features, return_scatterplot_structure=True, width_in_pixels=plot_width, height_in_pixels=plot_height, **kwargs)
html = TableStructure(scatterplot_structure, graph_renderer=graph_renderer).to_html()
return html<|docstring|>:param df: pd.DataFrame
:param text_col: str
:param source_col: str
:param dest_col: str
:param source_name: str
:param dest_name: str
:param plot_width: int
:param plot_height: int
:param enable_pan_and_zoom: bool
:param engine: str, The graphviz engine (e.g., dot or neat)
:param graph_params dict or None, graph parameters in graph viz
:param node_params dict or None, node parameters in graph viz
:param category_order list or None, names of categories to show in order
:param kwargs: dict
:return: str<|endoftext|> |
6fa6cf4900bed0d3449062b30e52b009c8fa015a0821fc9977a62aa904c5c436 | @subcommand()
def cmd_init(args):
'Initialize the papers directory (first use only).'
with Papers(setup=True) as p:
print('Initialized {} as Papers directory'.format(p.base_dir)) | Initialize the papers directory (first use only). | papers.py | cmd_init | FilippoBiga/Papers | 1 | python | @subcommand()
def cmd_init(args):
with Papers(setup=True) as p:
print('Initialized {} as Papers directory'.format(p.base_dir)) | @subcommand()
def cmd_init(args):
with Papers(setup=True) as p:
print('Initialized {} as Papers directory'.format(p.base_dir))<|docstring|>Initialize the papers directory (first use only).<|endoftext|> |
7ca7e810de2443da5c1fe414b324ab33ccd728163b8be0a4b818bb577400c5a4 | @subcommand([arg('-f', '--file', required=True, help='The file you want to import.'), arg('-t', '--title', required=True, help='The title of the paper being imported.'), arg('-k', '--keywords', help='Comma-separated list of keywords')])
def cmd_import(args):
'Import a new paper.'
keywords = []
if (args.keywords is not None):
keywords = args.keywords.split(',')
with Papers() as p:
p.add(args.file, args.title, keywords=keywords)
print("Imported '{}'".format(args.title)) | Import a new paper. | papers.py | cmd_import | FilippoBiga/Papers | 1 | python | @subcommand([arg('-f', '--file', required=True, help='The file you want to import.'), arg('-t', '--title', required=True, help='The title of the paper being imported.'), arg('-k', '--keywords', help='Comma-separated list of keywords')])
def cmd_import(args):
keywords = []
if (args.keywords is not None):
keywords = args.keywords.split(',')
with Papers() as p:
p.add(args.file, args.title, keywords=keywords)
print("Imported '{}'".format(args.title)) | @subcommand([arg('-f', '--file', required=True, help='The file you want to import.'), arg('-t', '--title', required=True, help='The title of the paper being imported.'), arg('-k', '--keywords', help='Comma-separated list of keywords')])
def cmd_import(args):
keywords = []
if (args.keywords is not None):
keywords = args.keywords.split(',')
with Papers() as p:
p.add(args.file, args.title, keywords=keywords)
print("Imported '{}'".format(args.title))<|docstring|>Import a new paper.<|endoftext|> |
9823d4024369fefa210518fd64db581f2979aa4a14dca443e7b5f688a0d24e68 | @subcommand([arg('-p', '--paper_id', required=True, help='The identifier of the paper to delete.')])
def cmd_delete(args):
'Delete a paper and all the data related to it.'
with Papers() as p:
p.delete(args.paper_id)
print('Removed {}'.format(args.paper_id)) | Delete a paper and all the data related to it. | papers.py | cmd_delete | FilippoBiga/Papers | 1 | python | @subcommand([arg('-p', '--paper_id', required=True, help='The identifier of the paper to delete.')])
def cmd_delete(args):
with Papers() as p:
p.delete(args.paper_id)
print('Removed {}'.format(args.paper_id)) | @subcommand([arg('-p', '--paper_id', required=True, help='The identifier of the paper to delete.')])
def cmd_delete(args):
with Papers() as p:
p.delete(args.paper_id)
print('Removed {}'.format(args.paper_id))<|docstring|>Delete a paper and all the data related to it.<|endoftext|> |
7c9ba77aad7686e622559fbd36d00901b17fe42a57089c34e44e4aea48098f9d | @subcommand([arg('-s', '--show-status', required=False, action='store_true', help='Show status of each paper.'), arg('-d', '--show-date', required=False, action='store_true', help='Show date of each paper.')])
def cmd_list(args):
'List papers.'
with Papers() as p:
for paper in p.list():
print(format_entry(paper, status=args.show_status, date=args.show_date)) | List papers. | papers.py | cmd_list | FilippoBiga/Papers | 1 | python | @subcommand([arg('-s', '--show-status', required=False, action='store_true', help='Show status of each paper.'), arg('-d', '--show-date', required=False, action='store_true', help='Show date of each paper.')])
def cmd_list(args):
with Papers() as p:
for paper in p.list():
print(format_entry(paper, status=args.show_status, date=args.show_date)) | @subcommand([arg('-s', '--show-status', required=False, action='store_true', help='Show status of each paper.'), arg('-d', '--show-date', required=False, action='store_true', help='Show date of each paper.')])
def cmd_list(args):
with Papers() as p:
for paper in p.list():
print(format_entry(paper, status=args.show_status, date=args.show_date))<|docstring|>List papers.<|endoftext|> |
e3867a5eca910b5b68ef0884b38f9957b274c116ee2976be0e1fa9a546981287 | @subcommand([arg('-s', '--show-status', required=False, action='store_true', help='Show status of the paper.'), arg('-d', '--show-date', required=False, action='store_true', help='Show date of the paper.')])
def cmd_last(args):
'Retrieve the last added paper.'
with Papers() as p:
print(format_entry(p.last(), status=args.show_status, date=args.show_date)) | Retrieve the last added paper. | papers.py | cmd_last | FilippoBiga/Papers | 1 | python | @subcommand([arg('-s', '--show-status', required=False, action='store_true', help='Show status of the paper.'), arg('-d', '--show-date', required=False, action='store_true', help='Show date of the paper.')])
def cmd_last(args):
with Papers() as p:
print(format_entry(p.last(), status=args.show_status, date=args.show_date)) | @subcommand([arg('-s', '--show-status', required=False, action='store_true', help='Show status of the paper.'), arg('-d', '--show-date', required=False, action='store_true', help='Show date of the paper.')])
def cmd_last(args):
with Papers() as p:
print(format_entry(p.last(), status=args.show_status, date=args.show_date))<|docstring|>Retrieve the last added paper.<|endoftext|> |
68a930d78b747689aa919c6c9ffc5562ef223a4ce8c4c7e6293630fc9695915c | @subcommand([arg('-s', '--status', required=True, choices=['unread', 'wip', 'skimmed', 'read'], help='Read status of the paper.'), arg('-p', '--paper_id', required=True, help='The identifier of the paper to update.')])
def cmd_mark(args):
'Set the status of a paper.'
with Papers() as p:
p.mark(args.status, args.paper_id)
print('Marked {} as {}'.format(args.paper_id, args.status)) | Set the status of a paper. | papers.py | cmd_mark | FilippoBiga/Papers | 1 | python | @subcommand([arg('-s', '--status', required=True, choices=['unread', 'wip', 'skimmed', 'read'], help='Read status of the paper.'), arg('-p', '--paper_id', required=True, help='The identifier of the paper to update.')])
def cmd_mark(args):
with Papers() as p:
p.mark(args.status, args.paper_id)
print('Marked {} as {}'.format(args.paper_id, args.status)) | @subcommand([arg('-s', '--status', required=True, choices=['unread', 'wip', 'skimmed', 'read'], help='Read status of the paper.'), arg('-p', '--paper_id', required=True, help='The identifier of the paper to update.')])
def cmd_mark(args):
with Papers() as p:
p.mark(args.status, args.paper_id)
print('Marked {} as {}'.format(args.paper_id, args.status))<|docstring|>Set the status of a paper.<|endoftext|> |
8a7b30f303e2c8ee248ba983be3bfb65ae2e84a110fa1f748d445e9341625a36 | @subcommand([arg('-a', '--add', help='Associate a keyword to a paper.'), arg('-r', '--remove', help='Remove a keyword from a paper.'), arg('-l', '--list', action='store_true', help='List all the keywords associated to a paper.'), arg('-p', '--paper_id', required=True, help='The identifier of the paper to update.')])
def cmd_word(args):
'Manage keywords associated with a paper.'
def check_opts(x):
assert x, Color.fail('Only one action can be specified')
with Papers() as p:
if args.list:
check_opts(((args.add is None) and (args.remove is None)))
(entry, keywords) = p.retrieve(args.paper_id, keywords=True)
print(format_title_keywords(entry.title, keywords))
elif (args.add is not None):
check_opts((args.remove is None))
p.tag(args.add, args.paper_id)
elif (args.remove is not None):
p.untag(args.remove, args.paper_id) | Manage keywords associated with a paper. | papers.py | cmd_word | FilippoBiga/Papers | 1 | python | @subcommand([arg('-a', '--add', help='Associate a keyword to a paper.'), arg('-r', '--remove', help='Remove a keyword from a paper.'), arg('-l', '--list', action='store_true', help='List all the keywords associated to a paper.'), arg('-p', '--paper_id', required=True, help='The identifier of the paper to update.')])
def cmd_word(args):
def check_opts(x):
assert x, Color.fail('Only one action can be specified')
with Papers() as p:
if args.list:
check_opts(((args.add is None) and (args.remove is None)))
(entry, keywords) = p.retrieve(args.paper_id, keywords=True)
print(format_title_keywords(entry.title, keywords))
elif (args.add is not None):
check_opts((args.remove is None))
p.tag(args.add, args.paper_id)
elif (args.remove is not None):
p.untag(args.remove, args.paper_id) | @subcommand([arg('-a', '--add', help='Associate a keyword to a paper.'), arg('-r', '--remove', help='Remove a keyword from a paper.'), arg('-l', '--list', action='store_true', help='List all the keywords associated to a paper.'), arg('-p', '--paper_id', required=True, help='The identifier of the paper to update.')])
def cmd_word(args):
def check_opts(x):
assert x, Color.fail('Only one action can be specified')
with Papers() as p:
if args.list:
check_opts(((args.add is None) and (args.remove is None)))
(entry, keywords) = p.retrieve(args.paper_id, keywords=True)
print(format_title_keywords(entry.title, keywords))
elif (args.add is not None):
check_opts((args.remove is None))
p.tag(args.add, args.paper_id)
elif (args.remove is not None):
p.untag(args.remove, args.paper_id)<|docstring|>Manage keywords associated with a paper.<|endoftext|> |
2a147d3f007379b7871667de976eff299978fb2627ca837bdc937247a02b4ca8 | @subcommand([arg('-k', '--keyword', help='Search on keywords.'), arg('-t', '--title', help='Search on paper titles')])
def cmd_search(args):
'Search through keywords and titles'
with Papers() as p:
for (paper, keywords) in p.filter(args.title, args.keyword):
title = paper.title
if (args.keyword is not None):
keywords = map((lambda x: Color.highlight_matches(x, args.keyword)), keywords)
if (args.title is not None):
title = Color.highlight_matches(title, args.title)
print((format_title_keywords(title, keywords) + '\n')) | Search through keywords and titles | papers.py | cmd_search | FilippoBiga/Papers | 1 | python | @subcommand([arg('-k', '--keyword', help='Search on keywords.'), arg('-t', '--title', help='Search on paper titles')])
def cmd_search(args):
with Papers() as p:
for (paper, keywords) in p.filter(args.title, args.keyword):
title = paper.title
if (args.keyword is not None):
keywords = map((lambda x: Color.highlight_matches(x, args.keyword)), keywords)
if (args.title is not None):
title = Color.highlight_matches(title, args.title)
print((format_title_keywords(title, keywords) + '\n')) | @subcommand([arg('-k', '--keyword', help='Search on keywords.'), arg('-t', '--title', help='Search on paper titles')])
def cmd_search(args):
with Papers() as p:
for (paper, keywords) in p.filter(args.title, args.keyword):
title = paper.title
if (args.keyword is not None):
keywords = map((lambda x: Color.highlight_matches(x, args.keyword)), keywords)
if (args.title is not None):
title = Color.highlight_matches(title, args.title)
print((format_title_keywords(title, keywords) + '\n'))<|docstring|>Search through keywords and titles<|endoftext|> |
031133afb11cf3a78b00daf40d7b2e1c2cc9f2b091e704fced2f9e3f9ad07c95 | @subcommand([arg('-p', '--paper_id', required=True, help='The identifier of the paper to open.')])
def cmd_open(args):
'Open the directory containing the given paper.'
with Papers() as p:
p.open(args.paper_id) | Open the directory containing the given paper. | papers.py | cmd_open | FilippoBiga/Papers | 1 | python | @subcommand([arg('-p', '--paper_id', required=True, help='The identifier of the paper to open.')])
def cmd_open(args):
with Papers() as p:
p.open(args.paper_id) | @subcommand([arg('-p', '--paper_id', required=True, help='The identifier of the paper to open.')])
def cmd_open(args):
with Papers() as p:
p.open(args.paper_id)<|docstring|>Open the directory containing the given paper.<|endoftext|> |
d5cd94d2dd61c3df8a05af713485cc927a3ac01d1c538dc61983958b18e3e9b2 | @staticmethod
def wrap(s, c):
' Wrap s with color c and the terminator '
return '{}{}{}'.format(c, s, Color._ENDC) | Wrap s with color c and the terminator | papers.py | wrap | FilippoBiga/Papers | 1 | python | @staticmethod
def wrap(s, c):
' '
return '{}{}{}'.format(c, s, Color._ENDC) | @staticmethod
def wrap(s, c):
' '
return '{}{}{}'.format(c, s, Color._ENDC)<|docstring|>Wrap s with color c and the terminator<|endoftext|> |
73e18e850bd617377698b94cbcd58c705378fbbeffe60d3b9e6eed4b7194ea4e | @staticmethod
def highlight_matches(s, match):
' Highlight all the occurrences of match in s with the MATCHING color '
pattern = re.compile(match, re.IGNORECASE)
for m in re.finditer(pattern, s):
s = ((s[0:m.start()] + Color.matching(s[m.start():m.end()])) + s[m.end():])
return s | Highlight all the occurrences of match in s with the MATCHING color | papers.py | highlight_matches | FilippoBiga/Papers | 1 | python | @staticmethod
def highlight_matches(s, match):
' '
pattern = re.compile(match, re.IGNORECASE)
for m in re.finditer(pattern, s):
s = ((s[0:m.start()] + Color.matching(s[m.start():m.end()])) + s[m.end():])
return s | @staticmethod
def highlight_matches(s, match):
' '
pattern = re.compile(match, re.IGNORECASE)
for m in re.finditer(pattern, s):
s = ((s[0:m.start()] + Color.matching(s[m.start():m.end()])) + s[m.end():])
return s<|docstring|>Highlight all the occurrences of match in s with the MATCHING color<|endoftext|> |
fe18d402a829f24a498a4715907946f5bc04edc718fdc030fbf05e7d3c87d6f0 | def translate_last(method):
"\n\t\tConvert 'last' to the pid of the last added paper.\n\t\tDecorator to be applied to every method that takes a paper id (as a last argument)\n\t\t"
def wrapped(instance, *args):
pid_arg = args[(- 1)]
arg_list = list(args)
arg_list[(- 1)] = (instance.last_paper()[0] if (pid_arg == 'last') else pid_arg)
return method(instance, *tuple(arg_list))
return wrapped | Convert 'last' to the pid of the last added paper.
Decorator to be applied to every method that takes a paper id (as a last argument) | papers.py | translate_last | FilippoBiga/Papers | 1 | python | def translate_last(method):
"\n\t\tConvert 'last' to the pid of the last added paper.\n\t\tDecorator to be applied to every method that takes a paper id (as a last argument)\n\t\t"
def wrapped(instance, *args):
pid_arg = args[(- 1)]
arg_list = list(args)
arg_list[(- 1)] = (instance.last_paper()[0] if (pid_arg == 'last') else pid_arg)
return method(instance, *tuple(arg_list))
return wrapped | def translate_last(method):
"\n\t\tConvert 'last' to the pid of the last added paper.\n\t\tDecorator to be applied to every method that takes a paper id (as a last argument)\n\t\t"
def wrapped(instance, *args):
pid_arg = args[(- 1)]
arg_list = list(args)
arg_list[(- 1)] = (instance.last_paper()[0] if (pid_arg == 'last') else pid_arg)
return method(instance, *tuple(arg_list))
return wrapped<|docstring|>Convert 'last' to the pid of the last added paper.
Decorator to be applied to every method that takes a paper id (as a last argument)<|endoftext|> |
f2ee45e42943c8e4230a5fb401292587fc1bab3da9084ea1b07dfaf43c705b70 | def last_paper(self):
' Retrieve the last added paper '
try:
self.cur.execute('\n\t\t\t\tSELECT * FROM papers\n\t\t\t\tORDER BY date_added DESC\n\t\t\t')
return Database.Entry(*self.cur.fetchone())
except sqlite3.Error as e:
self._err('Error retrieving last paper', e) | Retrieve the last added paper | papers.py | last_paper | FilippoBiga/Papers | 1 | python | def last_paper(self):
' '
try:
self.cur.execute('\n\t\t\t\tSELECT * FROM papers\n\t\t\t\tORDER BY date_added DESC\n\t\t\t')
return Database.Entry(*self.cur.fetchone())
except sqlite3.Error as e:
self._err('Error retrieving last paper', e) | def last_paper(self):
' '
try:
self.cur.execute('\n\t\t\t\tSELECT * FROM papers\n\t\t\t\tORDER BY date_added DESC\n\t\t\t')
return Database.Entry(*self.cur.fetchone())
except sqlite3.Error as e:
self._err('Error retrieving last paper', e)<|docstring|>Retrieve the last added paper<|endoftext|> |
1a9ce711d8d0971015c585875298596784ddb264754737f61268a9adcd5ce540 | @translate_last
def get_keywords(self, pid):
' Retrieve all the keywords associated to a certain paper '
try:
self.cur.execute('\n\t\t\t\tSELECT word FROM keywords\n\t\t\t\tWHERE pid = ?\n\t\t\t', (pid,))
return list(map((lambda x: x[0]), self.cur.fetchall()))
except sqlite3.Error as e:
self._err('Error retrieving keywords', e) | Retrieve all the keywords associated to a certain paper | papers.py | get_keywords | FilippoBiga/Papers | 1 | python | @translate_last
def get_keywords(self, pid):
' '
try:
self.cur.execute('\n\t\t\t\tSELECT word FROM keywords\n\t\t\t\tWHERE pid = ?\n\t\t\t', (pid,))
return list(map((lambda x: x[0]), self.cur.fetchall()))
except sqlite3.Error as e:
self._err('Error retrieving keywords', e) | @translate_last
def get_keywords(self, pid):
' '
try:
self.cur.execute('\n\t\t\t\tSELECT word FROM keywords\n\t\t\t\tWHERE pid = ?\n\t\t\t', (pid,))
return list(map((lambda x: x[0]), self.cur.fetchall()))
except sqlite3.Error as e:
self._err('Error retrieving keywords', e)<|docstring|>Retrieve all the keywords associated to a certain paper<|endoftext|> |
0cd3498f353fd275baad8b06519790213fca9e0e06cc4422e3e1c9a951075052 | def insert(self, title, relpath, keywords):
' Insert a paper entry into the database (and possibly the keywords) '
try:
self.cur.execute('\n\t\t\t\tINSERT INTO papers(title, relpath)\n\t\t\t\tVALUES(?,?)', (title, relpath))
pid = self.last_paper().id
for kword in keywords:
self.add_keyword(kword, pid)
self.conn.commit()
except sqlite3.Error as e:
self._err('Error inserting paper', e) | Insert a paper entry into the database (and possibly the keywords) | papers.py | insert | FilippoBiga/Papers | 1 | python | def insert(self, title, relpath, keywords):
' '
try:
self.cur.execute('\n\t\t\t\tINSERT INTO papers(title, relpath)\n\t\t\t\tVALUES(?,?)', (title, relpath))
pid = self.last_paper().id
for kword in keywords:
self.add_keyword(kword, pid)
self.conn.commit()
except sqlite3.Error as e:
self._err('Error inserting paper', e) | def insert(self, title, relpath, keywords):
' '
try:
self.cur.execute('\n\t\t\t\tINSERT INTO papers(title, relpath)\n\t\t\t\tVALUES(?,?)', (title, relpath))
pid = self.last_paper().id
for kword in keywords:
self.add_keyword(kword, pid)
self.conn.commit()
except sqlite3.Error as e:
self._err('Error inserting paper', e)<|docstring|>Insert a paper entry into the database (and possibly the keywords)<|endoftext|> |
1afdfef9f165ae684c82aff93a9183addf6b8458f3c61e1657877b4d8abea4fe | @translate_last
def remove(self, pid):
' Remove a paper from the DB '
try:
found = self.find_paper(pid)
relpath = found.relpath
self.cur.execute('DELETE FROM papers WHERE id = ?', (pid,))
self.conn.commit()
return relpath
except sqlite3.Error as e:
self._err('Error deleting paper', e) | Remove a paper from the DB | papers.py | remove | FilippoBiga/Papers | 1 | python | @translate_last
def remove(self, pid):
' '
try:
found = self.find_paper(pid)
relpath = found.relpath
self.cur.execute('DELETE FROM papers WHERE id = ?', (pid,))
self.conn.commit()
return relpath
except sqlite3.Error as e:
self._err('Error deleting paper', e) | @translate_last
def remove(self, pid):
' '
try:
found = self.find_paper(pid)
relpath = found.relpath
self.cur.execute('DELETE FROM papers WHERE id = ?', (pid,))
self.conn.commit()
return relpath
except sqlite3.Error as e:
self._err('Error deleting paper', e)<|docstring|>Remove a paper from the DB<|endoftext|> |
14fc74a81b4112b7cb90370279f242458ea2c7fa69b07d725495373f4514e054 | def search(self, title=None, keyword=None):
'\n\t\tSearch the papers, expose an iterator.\n\t\tNote that if both title and keyword are None, all the papers will match.\n\t\t(This is indeed how Papers.list() is implemented)\n\t\t'
def _match(etitle, ekwds):
keyword_match = False
title_match = False
if (keyword is not None):
low_keyword = keyword.lower()
keyword_match = any(map((lambda x: (x.lower().find(low_keyword) != (- 1))), stored_keywords))
if (title is not None):
title_match = (title.lower() in entry.title.lower())
return (keyword_match or title_match)
try:
self.cur.execute('\n\t\t\t\tSELECT * FROM papers\n\t\t\t\tORDER BY date_added DESC\n\t\t\t')
entries = map((lambda x: Database.Entry(*x)), self.cur.fetchall())
match_all = ((title is None) and (keyword is None))
for entry in entries:
stored_keywords = self.get_keywords(entry.id)
did_match = (match_all or _match(entry.title, stored_keywords))
if did_match:
(yield (entry, stored_keywords))
except sqlite3.Error as e:
self._err('Error retrieving paper list', e) | Search the papers, expose an iterator.
Note that if both title and keyword are None, all the papers will match.
(This is indeed how Papers.list() is implemented) | papers.py | search | FilippoBiga/Papers | 1 | python | def search(self, title=None, keyword=None):
'\n\t\tSearch the papers, expose an iterator.\n\t\tNote that if both title and keyword are None, all the papers will match.\n\t\t(This is indeed how Papers.list() is implemented)\n\t\t'
def _match(etitle, ekwds):
keyword_match = False
title_match = False
if (keyword is not None):
low_keyword = keyword.lower()
keyword_match = any(map((lambda x: (x.lower().find(low_keyword) != (- 1))), stored_keywords))
if (title is not None):
title_match = (title.lower() in entry.title.lower())
return (keyword_match or title_match)
try:
self.cur.execute('\n\t\t\t\tSELECT * FROM papers\n\t\t\t\tORDER BY date_added DESC\n\t\t\t')
entries = map((lambda x: Database.Entry(*x)), self.cur.fetchall())
match_all = ((title is None) and (keyword is None))
for entry in entries:
stored_keywords = self.get_keywords(entry.id)
did_match = (match_all or _match(entry.title, stored_keywords))
if did_match:
(yield (entry, stored_keywords))
except sqlite3.Error as e:
self._err('Error retrieving paper list', e) | def search(self, title=None, keyword=None):
'\n\t\tSearch the papers, expose an iterator.\n\t\tNote that if both title and keyword are None, all the papers will match.\n\t\t(This is indeed how Papers.list() is implemented)\n\t\t'
def _match(etitle, ekwds):
keyword_match = False
title_match = False
if (keyword is not None):
low_keyword = keyword.lower()
keyword_match = any(map((lambda x: (x.lower().find(low_keyword) != (- 1))), stored_keywords))
if (title is not None):
title_match = (title.lower() in entry.title.lower())
return (keyword_match or title_match)
try:
self.cur.execute('\n\t\t\t\tSELECT * FROM papers\n\t\t\t\tORDER BY date_added DESC\n\t\t\t')
entries = map((lambda x: Database.Entry(*x)), self.cur.fetchall())
match_all = ((title is None) and (keyword is None))
for entry in entries:
stored_keywords = self.get_keywords(entry.id)
did_match = (match_all or _match(entry.title, stored_keywords))
if did_match:
(yield (entry, stored_keywords))
except sqlite3.Error as e:
self._err('Error retrieving paper list', e)<|docstring|>Search the papers, expose an iterator.
Note that if both title and keyword are None, all the papers will match.
(This is indeed how Papers.list() is implemented)<|endoftext|> |
e5288d986fbb604dc02ede2890e5c2a867229f7a744e808be913eefaac7b8dfd | @translate_last
def update_status(self, status, pid):
' Update the reading status of a paper '
try:
code = Status(status).code
self.cur.execute('\n\t\t\t\tUPDATE papers\n\t\t\t\tSET status = ?\n\t\t\t\tWHERE id = ?\n\t\t\t', (code, pid))
self.conn.commit()
except sqlite3.Error as e:
self._err('Error updating paper status', e) | Update the reading status of a paper | papers.py | update_status | FilippoBiga/Papers | 1 | python | @translate_last
def update_status(self, status, pid):
' '
try:
code = Status(status).code
self.cur.execute('\n\t\t\t\tUPDATE papers\n\t\t\t\tSET status = ?\n\t\t\t\tWHERE id = ?\n\t\t\t', (code, pid))
self.conn.commit()
except sqlite3.Error as e:
self._err('Error updating paper status', e) | @translate_last
def update_status(self, status, pid):
' '
try:
code = Status(status).code
self.cur.execute('\n\t\t\t\tUPDATE papers\n\t\t\t\tSET status = ?\n\t\t\t\tWHERE id = ?\n\t\t\t', (code, pid))
self.conn.commit()
except sqlite3.Error as e:
self._err('Error updating paper status', e)<|docstring|>Update the reading status of a paper<|endoftext|> |
21ce652fa8f253bc6294599cff8037d36b0fa97d280a5dc3ed74037027bd7e10 | def paper_subdir(self, ntitle):
' Subdirectory of a paper given the normalized title '
return os.path.join(self.directory, ntitle) | Subdirectory of a paper given the normalized title | papers.py | paper_subdir | FilippoBiga/Papers | 1 | python | def paper_subdir(self, ntitle):
' '
return os.path.join(self.directory, ntitle) | def paper_subdir(self, ntitle):
' '
return os.path.join(self.directory, ntitle)<|docstring|>Subdirectory of a paper given the normalized title<|endoftext|> |
f422fb6caa4ffbc8bfa835f47451b840e59e0d25b0f1e27fe0af6ab7f915703b | def add(self, file, title):
' Import a paper (create subdir, copy file, create notes.txt) '
normalized_title = title.replace(' ', '_').lower()
paper_dir = self.paper_subdir(normalized_title)
assert (not os.path.exists(paper_dir)), Color.fail('{} already exists'.format(paper_dir))
os.makedirs(paper_dir)
shutil.copy2(file, paper_dir)
open(os.path.join(paper_dir, 'notes.txt'), 'a').close()
return os.path.relpath(paper_dir, self.directory) | Import a paper (create subdir, copy file, create notes.txt) | papers.py | add | FilippoBiga/Papers | 1 | python | def add(self, file, title):
' '
normalized_title = title.replace(' ', '_').lower()
paper_dir = self.paper_subdir(normalized_title)
assert (not os.path.exists(paper_dir)), Color.fail('{} already exists'.format(paper_dir))
os.makedirs(paper_dir)
shutil.copy2(file, paper_dir)
open(os.path.join(paper_dir, 'notes.txt'), 'a').close()
return os.path.relpath(paper_dir, self.directory) | def add(self, file, title):
' '
normalized_title = title.replace(' ', '_').lower()
paper_dir = self.paper_subdir(normalized_title)
assert (not os.path.exists(paper_dir)), Color.fail('{} already exists'.format(paper_dir))
os.makedirs(paper_dir)
shutil.copy2(file, paper_dir)
open(os.path.join(paper_dir, 'notes.txt'), 'a').close()
return os.path.relpath(paper_dir, self.directory)<|docstring|>Import a paper (create subdir, copy file, create notes.txt)<|endoftext|> |
b1436ebb4ddc18c9d52b36f154fe34d712c5487338d31222c80813947f125f2d | def add(self, file, title, keywords):
' Add a paper '
assert os.path.exists(file), Color.fail('{} does not exist'.format(file))
relpath = self.storage.add(file, title)
self.db.insert(title, relpath, keywords) | Add a paper | papers.py | add | FilippoBiga/Papers | 1 | python | def add(self, file, title, keywords):
' '
assert os.path.exists(file), Color.fail('{} does not exist'.format(file))
relpath = self.storage.add(file, title)
self.db.insert(title, relpath, keywords) | def add(self, file, title, keywords):
' '
assert os.path.exists(file), Color.fail('{} does not exist'.format(file))
relpath = self.storage.add(file, title)
self.db.insert(title, relpath, keywords)<|docstring|>Add a paper<|endoftext|> |
167cf3f22ae5d79e02ed62eebb246aa68ded4b15e14dec046929b6b95fde2fe8 | def delete(self, pid):
' Delete a paper '
relpath = self.db.remove(pid)
self.storage.delete(relpath) | Delete a paper | papers.py | delete | FilippoBiga/Papers | 1 | python | def delete(self, pid):
' '
relpath = self.db.remove(pid)
self.storage.delete(relpath) | def delete(self, pid):
' '
relpath = self.db.remove(pid)
self.storage.delete(relpath)<|docstring|>Delete a paper<|endoftext|> |
5127ac9d1466a8d7bbf9bb6a4b0b7dc93c5e1e38276d104bb7e0615b2d703c9f | def list(self):
' List all the papers '
result = []
for (entry, keywords) in self.db.search(title=None, keyword=None):
result.append(entry)
return result | List all the papers | papers.py | list | FilippoBiga/Papers | 1 | python | def list(self):
' '
result = []
for (entry, keywords) in self.db.search(title=None, keyword=None):
result.append(entry)
return result | def list(self):
' '
result = []
for (entry, keywords) in self.db.search(title=None, keyword=None):
result.append(entry)
return result<|docstring|>List all the papers<|endoftext|> |
be194935a6338b85a60ff3d8378deeb499204238f54693079901ee4fa9d957a6 | def filter(self, title=None, keyword=None):
' Filter the papers based on title and keyword '
assert ((title is not None) or (keyword is not None)), Color.fail('Either title or keyword should not be empty')
return self.db.search(title=title, keyword=keyword) | Filter the papers based on title and keyword | papers.py | filter | FilippoBiga/Papers | 1 | python | def filter(self, title=None, keyword=None):
' '
assert ((title is not None) or (keyword is not None)), Color.fail('Either title or keyword should not be empty')
return self.db.search(title=title, keyword=keyword) | def filter(self, title=None, keyword=None):
' '
assert ((title is not None) or (keyword is not None)), Color.fail('Either title or keyword should not be empty')
return self.db.search(title=title, keyword=keyword)<|docstring|>Filter the papers based on title and keyword<|endoftext|> |
b95a94568ecf08e6e150be96994c97da21d5b89a2851ec0178592d0c9aa9bbde | def mark(self, status, pid):
' Update reading status '
self.db.update_status(status, pid) | Update reading status | papers.py | mark | FilippoBiga/Papers | 1 | python | def mark(self, status, pid):
' '
self.db.update_status(status, pid) | def mark(self, status, pid):
' '
self.db.update_status(status, pid)<|docstring|>Update reading status<|endoftext|> |
70338f46440d448f81b39db8e9d1db2a833311fd1180aa7a5461b57faeb0fd9e | def retrieve(self, pid, keywords=False):
' Retrieve a paper with the given pid '
entry = self.db.find_paper(pid)
assert (entry is not None), Color.fail('Could not retrieve paper')
if keywords:
stored_keywords = self.db.get_keywords(pid)
return (entry, stored_keywords)
return (entry,) | Retrieve a paper with the given pid | papers.py | retrieve | FilippoBiga/Papers | 1 | python | def retrieve(self, pid, keywords=False):
' '
entry = self.db.find_paper(pid)
assert (entry is not None), Color.fail('Could not retrieve paper')
if keywords:
stored_keywords = self.db.get_keywords(pid)
return (entry, stored_keywords)
return (entry,) | def retrieve(self, pid, keywords=False):
' '
entry = self.db.find_paper(pid)
assert (entry is not None), Color.fail('Could not retrieve paper')
if keywords:
stored_keywords = self.db.get_keywords(pid)
return (entry, stored_keywords)
return (entry,)<|docstring|>Retrieve a paper with the given pid<|endoftext|> |
99451948d132a63c803cc170b4cac65fb0a32fd7061f4e80fe84ab6cc81044b9 | def tag(self, keyword, pid):
' Associate keyword to a paper '
self.db.add_keyword(keyword, pid) | Associate keyword to a paper | papers.py | tag | FilippoBiga/Papers | 1 | python | def tag(self, keyword, pid):
' '
self.db.add_keyword(keyword, pid) | def tag(self, keyword, pid):
' '
self.db.add_keyword(keyword, pid)<|docstring|>Associate keyword to a paper<|endoftext|> |
30325577e5a6d300f5b00fff511f23fd29315a3b58d0781c3c8ace3d25a5fc68 | def untag(self, keyword, pid):
' Remove keyword from a paper '
self.db.remove_keyword(keyword, pid) | Remove keyword from a paper | papers.py | untag | FilippoBiga/Papers | 1 | python | def untag(self, keyword, pid):
' '
self.db.remove_keyword(keyword, pid) | def untag(self, keyword, pid):
' '
self.db.remove_keyword(keyword, pid)<|docstring|>Remove keyword from a paper<|endoftext|> |
71d54027d9e353a75ef37355877ebd2d4dd067bef19c2ba97d905981596ed48e | def open(self, pid):
' Open the subfolder for a given paper '
entry = self.db.find_paper(pid)
full_path = self.storage.paper_subdir(entry.relpath)
os.system('open "{}"'.format(full_path)) | Open the subfolder for a given paper | papers.py | open | FilippoBiga/Papers | 1 | python | def open(self, pid):
' '
entry = self.db.find_paper(pid)
full_path = self.storage.paper_subdir(entry.relpath)
os.system('open "{}"'.format(full_path)) | def open(self, pid):
' '
entry = self.db.find_paper(pid)
full_path = self.storage.paper_subdir(entry.relpath)
os.system('open "{}"'.format(full_path))<|docstring|>Open the subfolder for a given paper<|endoftext|> |
a6d68e42205cde97132691a14d7bde5e04b55d87b2809400028ec9ce0b65851d | def load_pkgs(model: VetiverModel=None, packages: list=None, path=''):
'Load packages necessary for predictions\n\n Args\n ----\n model: VetiverModel\n VetiverModel to extract packages from\n packages: list\n List of extra packages to include\n path: str\n Where to save output file\n '
required_pkgs = ['vetiver']
if packages:
required_pkgs = list(set((required_pkgs + packages)))
if model.metadata.get('required_pkgs'):
required_pkgs = list(set((required_pkgs + model.metadata.get('required_pkgs'))))
tmp = tempfile.NamedTemporaryFile(suffix='.in')
with open(tmp.name, 'a') as f:
for package in required_pkgs:
f.write((package + '\n'))
os.system(f'pip-compile {f.name} --output-file={path}vetiver_requirements.txt') | Load packages necessary for predictions
Args
----
model: VetiverModel
VetiverModel to extract packages from
packages: list
List of extra packages to include
path: str
Where to save output file | vetiver/attach_pkgs.py | load_pkgs | isabelizimm/vetiver-python | 0 | python | def load_pkgs(model: VetiverModel=None, packages: list=None, path=):
'Load packages necessary for predictions\n\n Args\n ----\n model: VetiverModel\n VetiverModel to extract packages from\n packages: list\n List of extra packages to include\n path: str\n Where to save output file\n '
required_pkgs = ['vetiver']
if packages:
required_pkgs = list(set((required_pkgs + packages)))
if model.metadata.get('required_pkgs'):
required_pkgs = list(set((required_pkgs + model.metadata.get('required_pkgs'))))
tmp = tempfile.NamedTemporaryFile(suffix='.in')
with open(tmp.name, 'a') as f:
for package in required_pkgs:
f.write((package + '\n'))
os.system(f'pip-compile {f.name} --output-file={path}vetiver_requirements.txt') | def load_pkgs(model: VetiverModel=None, packages: list=None, path=):
'Load packages necessary for predictions\n\n Args\n ----\n model: VetiverModel\n VetiverModel to extract packages from\n packages: list\n List of extra packages to include\n path: str\n Where to save output file\n '
required_pkgs = ['vetiver']
if packages:
required_pkgs = list(set((required_pkgs + packages)))
if model.metadata.get('required_pkgs'):
required_pkgs = list(set((required_pkgs + model.metadata.get('required_pkgs'))))
tmp = tempfile.NamedTemporaryFile(suffix='.in')
with open(tmp.name, 'a') as f:
for package in required_pkgs:
f.write((package + '\n'))
os.system(f'pip-compile {f.name} --output-file={path}vetiver_requirements.txt')<|docstring|>Load packages necessary for predictions
Args
----
model: VetiverModel
VetiverModel to extract packages from
packages: list
List of extra packages to include
path: str
Where to save output file<|endoftext|> |
4f126c63dced00f20d91af92f0a93b1372236199c626380ce38bbd1547578b23 | def _layer(inputs, mode, layer_num, filters, kernel_size, dilation_rate, dropout_rate):
'Layer building block of MeshNet.\n\n Performs 3D convolution, activation, batch normalization, and dropout on\n `inputs` tensor.\n\n Args:\n inputs : float `Tensor`, input tensor.\n mode : string, a TensorFlow mode key.\n layer_num : int, value to append to each operator name. This should be\n the layer number in the network.\n filters : int, number of 3D convolution filters.\n kernel_size : int or tuple, size of 3D convolution kernel.\n dilation_rate : int or tuple, rate of dilution in 3D convolution.\n dropout_rate : float, the dropout rate between 0 and 1.\n\n Returns:\n `Tensor` of same type as `inputs`.\n '
training = (mode == tf.estimator.ModeKeys.TRAIN)
with tf.variable_scope('layer_{}'.format(layer_num)):
conv = tf.layers.conv3d(inputs, filters=filters, kernel_size=kernel_size, padding='SAME', dilation_rate=dilation_rate, activation=None)
activation = tf.nn.relu(conv)
bn = tf.layers.batch_normalization(activation, training=training, fused=FUSED_BATCH_NORM)
return tf.layers.dropout(bn, rate=dropout_rate, training=training) | Layer building block of MeshNet.
Performs 3D convolution, activation, batch normalization, and dropout on
`inputs` tensor.
Args:
inputs : float `Tensor`, input tensor.
mode : string, a TensorFlow mode key.
layer_num : int, value to append to each operator name. This should be
the layer number in the network.
filters : int, number of 3D convolution filters.
kernel_size : int or tuple, size of 3D convolution kernel.
dilation_rate : int or tuple, rate of dilution in 3D convolution.
dropout_rate : float, the dropout rate between 0 and 1.
Returns:
`Tensor` of same type as `inputs`. | nobrainer/models/meshnet.py | _layer | soichih/kwyk_neuronet | 3 | python | def _layer(inputs, mode, layer_num, filters, kernel_size, dilation_rate, dropout_rate):
'Layer building block of MeshNet.\n\n Performs 3D convolution, activation, batch normalization, and dropout on\n `inputs` tensor.\n\n Args:\n inputs : float `Tensor`, input tensor.\n mode : string, a TensorFlow mode key.\n layer_num : int, value to append to each operator name. This should be\n the layer number in the network.\n filters : int, number of 3D convolution filters.\n kernel_size : int or tuple, size of 3D convolution kernel.\n dilation_rate : int or tuple, rate of dilution in 3D convolution.\n dropout_rate : float, the dropout rate between 0 and 1.\n\n Returns:\n `Tensor` of same type as `inputs`.\n '
training = (mode == tf.estimator.ModeKeys.TRAIN)
with tf.variable_scope('layer_{}'.format(layer_num)):
conv = tf.layers.conv3d(inputs, filters=filters, kernel_size=kernel_size, padding='SAME', dilation_rate=dilation_rate, activation=None)
activation = tf.nn.relu(conv)
bn = tf.layers.batch_normalization(activation, training=training, fused=FUSED_BATCH_NORM)
return tf.layers.dropout(bn, rate=dropout_rate, training=training) | def _layer(inputs, mode, layer_num, filters, kernel_size, dilation_rate, dropout_rate):
'Layer building block of MeshNet.\n\n Performs 3D convolution, activation, batch normalization, and dropout on\n `inputs` tensor.\n\n Args:\n inputs : float `Tensor`, input tensor.\n mode : string, a TensorFlow mode key.\n layer_num : int, value to append to each operator name. This should be\n the layer number in the network.\n filters : int, number of 3D convolution filters.\n kernel_size : int or tuple, size of 3D convolution kernel.\n dilation_rate : int or tuple, rate of dilution in 3D convolution.\n dropout_rate : float, the dropout rate between 0 and 1.\n\n Returns:\n `Tensor` of same type as `inputs`.\n '
training = (mode == tf.estimator.ModeKeys.TRAIN)
with tf.variable_scope('layer_{}'.format(layer_num)):
conv = tf.layers.conv3d(inputs, filters=filters, kernel_size=kernel_size, padding='SAME', dilation_rate=dilation_rate, activation=None)
activation = tf.nn.relu(conv)
bn = tf.layers.batch_normalization(activation, training=training, fused=FUSED_BATCH_NORM)
return tf.layers.dropout(bn, rate=dropout_rate, training=training)<|docstring|>Layer building block of MeshNet.
Performs 3D convolution, activation, batch normalization, and dropout on
`inputs` tensor.
Args:
inputs : float `Tensor`, input tensor.
mode : string, a TensorFlow mode key.
layer_num : int, value to append to each operator name. This should be
the layer number in the network.
filters : int, number of 3D convolution filters.
kernel_size : int or tuple, size of 3D convolution kernel.
dilation_rate : int or tuple, rate of dilution in 3D convolution.
dropout_rate : float, the dropout rate between 0 and 1.
Returns:
`Tensor` of same type as `inputs`.<|endoftext|> |
1b56b5ccc87ea0811ce044355c4f526030dc0b993cbe518f8e3f83b26d260640 | def model_fn(features, labels, mode, params, config=None):
'MeshNet model function.\n\n Args:\n features: 5D float `Tensor`, input tensor. This is the first item\n returned from the `input_fn` passed to `train`, `evaluate`, and\n `predict`. Use `NDHWC` format.\n labels: 4D float `Tensor`, labels tensor. This is the second item\n returned from the `input_fn` passed to `train`, `evaluate`, and\n `predict`. Labels should not be one-hot encoded.\n mode: Optional. Specifies if this training, evaluation or prediction.\n params: `dict` of parameters.\n - n_classes: (required) number of classes to classify.\n - optimizer: instance of TensorFlow optimizer. Required if\n training.\n - n_filters: number of filters to use in each convolution. The\n original implementation used 21 filters to classify brainmask\n and 71 filters for the multi-class problem.\n - dropout_rate: rate of dropout. For example, 0.1 would drop 10% of\n input units.\n config: configuration object.\n\n Returns:\n `tf.estimator.EstimatorSpec`\n\n Raises:\n `ValueError` if required parameters are not in `params`.\n '
volume = features
if isinstance(volume, dict):
volume = features['volume']
required_keys = {'n_classes'}
default_params = {'optimizer': None, 'n_filters': 21, 'dropout_rate': 0.25}
check_required_params(params=params, required_keys=required_keys)
set_default_params(params=params, defaults=default_params)
check_optimizer_for_training(optimizer=params['optimizer'], mode=mode)
tf.logging.debug('Parameters for model:')
tf.logging.debug(params)
dilation_rates = ((1, 1, 1), (1, 1, 1), (1, 1, 1), (2, 2, 2), (4, 4, 4), (8, 8, 8), (1, 1, 1))
outputs = volume
for (ii, dilation_rate) in enumerate(dilation_rates):
outputs = _layer(outputs, mode=mode, layer_num=(ii + 1), filters=params['n_filters'], kernel_size=3, dilation_rate=dilation_rate, dropout_rate=params['dropout_rate'])
with tf.variable_scope('logits'):
logits = tf.layers.conv3d(inputs=outputs, filters=params['n_classes'], kernel_size=(1, 1, 1), padding='SAME', activation=None)
predicted_classes = tf.argmax(logits, axis=(- 1))
if (mode == tf.estimator.ModeKeys.PREDICT):
predictions = {'class_ids': predicted_classes, 'probabilities': tf.nn.softmax(logits), 'logits': logits}
export_outputs = {'outputs': tf.estimator.export.PredictOutput(predictions)}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs=export_outputs)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
loss = tf.reduce_mean(cross_entropy)
labels = tf.cast(labels, predicted_classes.dtype)
labels_onehot = tf.one_hot(labels, params['n_classes'])
predictions_onehot = tf.one_hot(predicted_classes, params['n_classes'])
eval_metric_ops = {'accuracy': tf.metrics.accuracy(labels, predicted_classes), 'dice': streaming_dice(labels_onehot[(..., 1)], predictions_onehot[(..., 1)], axis=(1, 2, 3)), 'hamming': streaming_hamming(labels_onehot[(..., 1)], predictions_onehot[(..., 1)], axis=(1, 2, 3))}
if (mode == tf.estimator.ModeKeys.EVAL):
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
assert (mode == tf.estimator.ModeKeys.TRAIN)
global_step = tf.train.get_global_step()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = params['optimizer'].minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) | MeshNet model function.
Args:
features: 5D float `Tensor`, input tensor. This is the first item
returned from the `input_fn` passed to `train`, `evaluate`, and
`predict`. Use `NDHWC` format.
labels: 4D float `Tensor`, labels tensor. This is the second item
returned from the `input_fn` passed to `train`, `evaluate`, and
`predict`. Labels should not be one-hot encoded.
mode: Optional. Specifies if this training, evaluation or prediction.
params: `dict` of parameters.
- n_classes: (required) number of classes to classify.
- optimizer: instance of TensorFlow optimizer. Required if
training.
- n_filters: number of filters to use in each convolution. The
original implementation used 21 filters to classify brainmask
and 71 filters for the multi-class problem.
- dropout_rate: rate of dropout. For example, 0.1 would drop 10% of
input units.
config: configuration object.
Returns:
`tf.estimator.EstimatorSpec`
Raises:
`ValueError` if required parameters are not in `params`. | nobrainer/models/meshnet.py | model_fn | soichih/kwyk_neuronet | 3 | python | def model_fn(features, labels, mode, params, config=None):
'MeshNet model function.\n\n Args:\n features: 5D float `Tensor`, input tensor. This is the first item\n returned from the `input_fn` passed to `train`, `evaluate`, and\n `predict`. Use `NDHWC` format.\n labels: 4D float `Tensor`, labels tensor. This is the second item\n returned from the `input_fn` passed to `train`, `evaluate`, and\n `predict`. Labels should not be one-hot encoded.\n mode: Optional. Specifies if this training, evaluation or prediction.\n params: `dict` of parameters.\n - n_classes: (required) number of classes to classify.\n - optimizer: instance of TensorFlow optimizer. Required if\n training.\n - n_filters: number of filters to use in each convolution. The\n original implementation used 21 filters to classify brainmask\n and 71 filters for the multi-class problem.\n - dropout_rate: rate of dropout. For example, 0.1 would drop 10% of\n input units.\n config: configuration object.\n\n Returns:\n `tf.estimator.EstimatorSpec`\n\n Raises:\n `ValueError` if required parameters are not in `params`.\n '
volume = features
if isinstance(volume, dict):
volume = features['volume']
required_keys = {'n_classes'}
default_params = {'optimizer': None, 'n_filters': 21, 'dropout_rate': 0.25}
check_required_params(params=params, required_keys=required_keys)
set_default_params(params=params, defaults=default_params)
check_optimizer_for_training(optimizer=params['optimizer'], mode=mode)
tf.logging.debug('Parameters for model:')
tf.logging.debug(params)
dilation_rates = ((1, 1, 1), (1, 1, 1), (1, 1, 1), (2, 2, 2), (4, 4, 4), (8, 8, 8), (1, 1, 1))
outputs = volume
for (ii, dilation_rate) in enumerate(dilation_rates):
outputs = _layer(outputs, mode=mode, layer_num=(ii + 1), filters=params['n_filters'], kernel_size=3, dilation_rate=dilation_rate, dropout_rate=params['dropout_rate'])
with tf.variable_scope('logits'):
logits = tf.layers.conv3d(inputs=outputs, filters=params['n_classes'], kernel_size=(1, 1, 1), padding='SAME', activation=None)
predicted_classes = tf.argmax(logits, axis=(- 1))
if (mode == tf.estimator.ModeKeys.PREDICT):
predictions = {'class_ids': predicted_classes, 'probabilities': tf.nn.softmax(logits), 'logits': logits}
export_outputs = {'outputs': tf.estimator.export.PredictOutput(predictions)}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs=export_outputs)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
loss = tf.reduce_mean(cross_entropy)
labels = tf.cast(labels, predicted_classes.dtype)
labels_onehot = tf.one_hot(labels, params['n_classes'])
predictions_onehot = tf.one_hot(predicted_classes, params['n_classes'])
eval_metric_ops = {'accuracy': tf.metrics.accuracy(labels, predicted_classes), 'dice': streaming_dice(labels_onehot[(..., 1)], predictions_onehot[(..., 1)], axis=(1, 2, 3)), 'hamming': streaming_hamming(labels_onehot[(..., 1)], predictions_onehot[(..., 1)], axis=(1, 2, 3))}
if (mode == tf.estimator.ModeKeys.EVAL):
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
assert (mode == tf.estimator.ModeKeys.TRAIN)
global_step = tf.train.get_global_step()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = params['optimizer'].minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) | def model_fn(features, labels, mode, params, config=None):
'MeshNet model function.\n\n Args:\n features: 5D float `Tensor`, input tensor. This is the first item\n returned from the `input_fn` passed to `train`, `evaluate`, and\n `predict`. Use `NDHWC` format.\n labels: 4D float `Tensor`, labels tensor. This is the second item\n returned from the `input_fn` passed to `train`, `evaluate`, and\n `predict`. Labels should not be one-hot encoded.\n mode: Optional. Specifies if this training, evaluation or prediction.\n params: `dict` of parameters.\n - n_classes: (required) number of classes to classify.\n - optimizer: instance of TensorFlow optimizer. Required if\n training.\n - n_filters: number of filters to use in each convolution. The\n original implementation used 21 filters to classify brainmask\n and 71 filters for the multi-class problem.\n - dropout_rate: rate of dropout. For example, 0.1 would drop 10% of\n input units.\n config: configuration object.\n\n Returns:\n `tf.estimator.EstimatorSpec`\n\n Raises:\n `ValueError` if required parameters are not in `params`.\n '
volume = features
if isinstance(volume, dict):
volume = features['volume']
required_keys = {'n_classes'}
default_params = {'optimizer': None, 'n_filters': 21, 'dropout_rate': 0.25}
check_required_params(params=params, required_keys=required_keys)
set_default_params(params=params, defaults=default_params)
check_optimizer_for_training(optimizer=params['optimizer'], mode=mode)
tf.logging.debug('Parameters for model:')
tf.logging.debug(params)
dilation_rates = ((1, 1, 1), (1, 1, 1), (1, 1, 1), (2, 2, 2), (4, 4, 4), (8, 8, 8), (1, 1, 1))
outputs = volume
for (ii, dilation_rate) in enumerate(dilation_rates):
outputs = _layer(outputs, mode=mode, layer_num=(ii + 1), filters=params['n_filters'], kernel_size=3, dilation_rate=dilation_rate, dropout_rate=params['dropout_rate'])
with tf.variable_scope('logits'):
logits = tf.layers.conv3d(inputs=outputs, filters=params['n_classes'], kernel_size=(1, 1, 1), padding='SAME', activation=None)
predicted_classes = tf.argmax(logits, axis=(- 1))
if (mode == tf.estimator.ModeKeys.PREDICT):
predictions = {'class_ids': predicted_classes, 'probabilities': tf.nn.softmax(logits), 'logits': logits}
export_outputs = {'outputs': tf.estimator.export.PredictOutput(predictions)}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs=export_outputs)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
loss = tf.reduce_mean(cross_entropy)
labels = tf.cast(labels, predicted_classes.dtype)
labels_onehot = tf.one_hot(labels, params['n_classes'])
predictions_onehot = tf.one_hot(predicted_classes, params['n_classes'])
eval_metric_ops = {'accuracy': tf.metrics.accuracy(labels, predicted_classes), 'dice': streaming_dice(labels_onehot[(..., 1)], predictions_onehot[(..., 1)], axis=(1, 2, 3)), 'hamming': streaming_hamming(labels_onehot[(..., 1)], predictions_onehot[(..., 1)], axis=(1, 2, 3))}
if (mode == tf.estimator.ModeKeys.EVAL):
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
assert (mode == tf.estimator.ModeKeys.TRAIN)
global_step = tf.train.get_global_step()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = params['optimizer'].minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)<|docstring|>MeshNet model function.
Args:
features: 5D float `Tensor`, input tensor. This is the first item
returned from the `input_fn` passed to `train`, `evaluate`, and
`predict`. Use `NDHWC` format.
labels: 4D float `Tensor`, labels tensor. This is the second item
returned from the `input_fn` passed to `train`, `evaluate`, and
`predict`. Labels should not be one-hot encoded.
mode: Optional. Specifies if this training, evaluation or prediction.
params: `dict` of parameters.
- n_classes: (required) number of classes to classify.
- optimizer: instance of TensorFlow optimizer. Required if
training.
- n_filters: number of filters to use in each convolution. The
original implementation used 21 filters to classify brainmask
and 71 filters for the multi-class problem.
- dropout_rate: rate of dropout. For example, 0.1 would drop 10% of
input units.
config: configuration object.
Returns:
`tf.estimator.EstimatorSpec`
Raises:
`ValueError` if required parameters are not in `params`.<|endoftext|> |
be1f0c9e514eaaca98d5e234dd0eed6ac65e16463fe22756af836e59ff93a996 | def stem(self, text):
'Stem a text string to its common stem form.'
normalizedText = TextNormalizer.normalize_text(text)
words = normalizedText.split(' ')
stems = []
for word in words:
stems.append(self.stem_word(word))
return ' '.join(stems) | Stem a text string to its common stem form. | Stemmer/Stemmer.py | stem | edho08/Sastrawize | 0 | python | def stem(self, text):
normalizedText = TextNormalizer.normalize_text(text)
words = normalizedText.split(' ')
stems = []
for word in words:
stems.append(self.stem_word(word))
return ' '.join(stems) | def stem(self, text):
normalizedText = TextNormalizer.normalize_text(text)
words = normalizedText.split(' ')
stems = []
for word in words:
stems.append(self.stem_word(word))
return ' '.join(stems)<|docstring|>Stem a text string to its common stem form.<|endoftext|> |
b4d791c4bb3edd60d5b1df3e487594556039561344c1bb0a3ba07a84372335d5 | def stem_word(self, word):
'Stem a word to its common stem form.'
if self.is_plural(word):
return self.stem_plural_word(word)
else:
return self.stem_singular_word(word) | Stem a word to its common stem form. | Stemmer/Stemmer.py | stem_word | edho08/Sastrawize | 0 | python | def stem_word(self, word):
if self.is_plural(word):
return self.stem_plural_word(word)
else:
return self.stem_singular_word(word) | def stem_word(self, word):
if self.is_plural(word):
return self.stem_plural_word(word)
else:
return self.stem_singular_word(word)<|docstring|>Stem a word to its common stem form.<|endoftext|> |
630f2cf85563c9ab0bb18d4d558be57108a43e06bb237a598802da86ee10d0d6 | def stem_plural_word(self, plural):
'Stem a plural word to its common stem form.\n Asian J. (2007) "Effective Techniques for Indonesian Text Retrieval" page 76-77.\n\n @link http://researchbank.rmit.edu.au/eserv/rmit:6312/Asian.pdf\n '
matches = re.match('^(.*)-(.*)$', plural)
if (not matches):
return plural
words = [matches.group(1), matches.group(2)]
suffix = words[1]
suffixes = ['ku', 'mu', 'nya', 'lah', 'kah', 'tah', 'pun']
matches = re.match('^(.*)-(.*)$', words[0])
if ((suffix in suffixes) and matches):
words[0] = matches.group(1)
words[1] = ((matches.group(2) + '-') + suffix)
rootWord1 = self.stem_singular_word(words[0])
rootWord2 = self.stem_singular_word(words[1])
if ((not self.dictionary.contains(words[1])) and (rootWord2 == words[1])):
rootWord2 = self.stem_singular_word(('me' + words[1]))
if (rootWord1 == rootWord2):
return rootWord1
else:
return plural | Stem a plural word to its common stem form.
Asian J. (2007) "Effective Techniques for Indonesian Text Retrieval" page 76-77.
@link http://researchbank.rmit.edu.au/eserv/rmit:6312/Asian.pdf | Stemmer/Stemmer.py | stem_plural_word | edho08/Sastrawize | 0 | python | def stem_plural_word(self, plural):
'Stem a plural word to its common stem form.\n Asian J. (2007) "Effective Techniques for Indonesian Text Retrieval" page 76-77.\n\n @link http://researchbank.rmit.edu.au/eserv/rmit:6312/Asian.pdf\n '
matches = re.match('^(.*)-(.*)$', plural)
if (not matches):
return plural
words = [matches.group(1), matches.group(2)]
suffix = words[1]
suffixes = ['ku', 'mu', 'nya', 'lah', 'kah', 'tah', 'pun']
matches = re.match('^(.*)-(.*)$', words[0])
if ((suffix in suffixes) and matches):
words[0] = matches.group(1)
words[1] = ((matches.group(2) + '-') + suffix)
rootWord1 = self.stem_singular_word(words[0])
rootWord2 = self.stem_singular_word(words[1])
if ((not self.dictionary.contains(words[1])) and (rootWord2 == words[1])):
rootWord2 = self.stem_singular_word(('me' + words[1]))
if (rootWord1 == rootWord2):
return rootWord1
else:
return plural | def stem_plural_word(self, plural):
'Stem a plural word to its common stem form.\n Asian J. (2007) "Effective Techniques for Indonesian Text Retrieval" page 76-77.\n\n @link http://researchbank.rmit.edu.au/eserv/rmit:6312/Asian.pdf\n '
matches = re.match('^(.*)-(.*)$', plural)
if (not matches):
return plural
words = [matches.group(1), matches.group(2)]
suffix = words[1]
suffixes = ['ku', 'mu', 'nya', 'lah', 'kah', 'tah', 'pun']
matches = re.match('^(.*)-(.*)$', words[0])
if ((suffix in suffixes) and matches):
words[0] = matches.group(1)
words[1] = ((matches.group(2) + '-') + suffix)
rootWord1 = self.stem_singular_word(words[0])
rootWord2 = self.stem_singular_word(words[1])
if ((not self.dictionary.contains(words[1])) and (rootWord2 == words[1])):
rootWord2 = self.stem_singular_word(('me' + words[1]))
if (rootWord1 == rootWord2):
return rootWord1
else:
return plural<|docstring|>Stem a plural word to its common stem form.
Asian J. (2007) "Effective Techniques for Indonesian Text Retrieval" page 76-77.
@link http://researchbank.rmit.edu.au/eserv/rmit:6312/Asian.pdf<|endoftext|> |
d6e53aefb5c477efee20bc0011e34397b45caa00ec271cf68211f105c96dad2a | def stem_singular_word(self, word):
'Stem a singular word to its common stem form.'
context = Context(word, self.dictionary, self.visitor_provider)
context.execute()
return context.result | Stem a singular word to its common stem form. | Stemmer/Stemmer.py | stem_singular_word | edho08/Sastrawize | 0 | python | def stem_singular_word(self, word):
context = Context(word, self.dictionary, self.visitor_provider)
context.execute()
return context.result | def stem_singular_word(self, word):
context = Context(word, self.dictionary, self.visitor_provider)
context.execute()
return context.result<|docstring|>Stem a singular word to its common stem form.<|endoftext|> |
10b3166fd2d210b858fa1c0ac383dfb1dde604f9fe2499a073817604d28791bb | def __setitem__(self, k, v):
'Annotates this file with a ``(k, v)`` pair, which will be\n included in its JSON serialized form.\n\n '
if (k in {'location', 'contentType', 'contentLength', 'metadata'}):
raise ValueError("Invalid key '{}'".format(k))
self._metadata[k] = v | Annotates this file with a ``(k, v)`` pair, which will be
included in its JSON serialized form. | src/servicelib/results.py | __setitem__ | ecmwf/servicelib | 2 | python | def __setitem__(self, k, v):
'Annotates this file with a ``(k, v)`` pair, which will be\n included in its JSON serialized form.\n\n '
if (k in {'location', 'contentType', 'contentLength', 'metadata'}):
raise ValueError("Invalid key '{}'".format(k))
self._metadata[k] = v | def __setitem__(self, k, v):
'Annotates this file with a ``(k, v)`` pair, which will be\n included in its JSON serialized form.\n\n '
if (k in {'location', 'contentType', 'contentLength', 'metadata'}):
raise ValueError("Invalid key '{}'".format(k))
self._metadata[k] = v<|docstring|>Annotates this file with a ``(k, v)`` pair, which will be
included in its JSON serialized form.<|endoftext|> |
911ff57cd974538b1d89615cfd816b08b225df44537cd4b5001395530bef131d | def init_connection(self):
' create a connection and a cursor to access db '
config = app_config[self.config_type]
database_url = config.DATABASE_URL
self.admin_email = config.ADMIN_EMAIL
self.admin_password = config.ADMIN_PASSWORD
try:
global conn, cur
conn = psycopg2.connect(database_url)
cur = conn.cursor(cursor_factory=RealDictCursor)
return True
except Exception as error:
print('Error. Unable to establish Database connection')
print(error)
return False | create a connection and a cursor to access db | app/v2/db/database_config.py | init_connection | martinMutuma/def-politico | 0 | python | def init_connection(self):
' '
config = app_config[self.config_type]
database_url = config.DATABASE_URL
self.admin_email = config.ADMIN_EMAIL
self.admin_password = config.ADMIN_PASSWORD
try:
global conn, cur
conn = psycopg2.connect(database_url)
cur = conn.cursor(cursor_factory=RealDictCursor)
return True
except Exception as error:
print('Error. Unable to establish Database connection')
print(error)
return False | def init_connection(self):
' '
config = app_config[self.config_type]
database_url = config.DATABASE_URL
self.admin_email = config.ADMIN_EMAIL
self.admin_password = config.ADMIN_PASSWORD
try:
global conn, cur
conn = psycopg2.connect(database_url)
cur = conn.cursor(cursor_factory=RealDictCursor)
return True
except Exception as error:
print('Error. Unable to establish Database connection')
print(error)
return False<|docstring|>create a connection and a cursor to access db<|endoftext|> |
4a08e5fcc58fc375d51ba2904083673337a5ee53bc12e11e3039da4262eea747 | def create_db(self):
' Creates all the tables for the database '
for query in table_queries:
cur.execute(query)
conn.commit() | Creates all the tables for the database | app/v2/db/database_config.py | create_db | martinMutuma/def-politico | 0 | python | def create_db(self):
' '
for query in table_queries:
cur.execute(query)
conn.commit() | def create_db(self):
' '
for query in table_queries:
cur.execute(query)
conn.commit()<|docstring|>Creates all the tables for the database<|endoftext|> |
4d53a471fc69c1b70f0e0521354d407332f523cbea661c599c991f7c9a66b1d2 | def drop_db(self):
' Drops all tables '
for table in table_names:
cur.execute('DROP TABLE IF EXISTS {} CASCADE'.format(table))
conn.commit() | Drops all tables | app/v2/db/database_config.py | drop_db | martinMutuma/def-politico | 0 | python | def drop_db(self):
' '
for table in table_names:
cur.execute('DROP TABLE IF EXISTS {} CASCADE'.format(table))
conn.commit() | def drop_db(self):
' '
for table in table_names:
cur.execute('DROP TABLE IF EXISTS {} CASCADE'.format(table))
conn.commit()<|docstring|>Drops all tables<|endoftext|> |
98094455662b4d10fef1812469cf93026b7fa92febef86c9b4a3aa074f4df39e | def create_super_user(self):
' creates a default user who is an admin '
query = "SELECT * FROM users WHERE email = '[email protected]'"
cur.execute(query)
user = cur.fetchone()
if (not user):
cur.execute("INSERT INTO users (firstname, lastname, phonenumber,\n email, password, passport_url, admin) VALUES ('Bedan', 'Kimani',\n '0712068754', '{}', '{}',\n 'https://cdn2.iconfinder.com/data/icons/avatar-2/512/kan_boy-512.png',\n True)\n ".format(self.admin_email, generate_password_hash(self.admin_password)))
conn.commit() | creates a default user who is an admin | app/v2/db/database_config.py | create_super_user | martinMutuma/def-politico | 0 | python | def create_super_user(self):
' '
query = "SELECT * FROM users WHERE email = '[email protected]'"
cur.execute(query)
user = cur.fetchone()
if (not user):
cur.execute("INSERT INTO users (firstname, lastname, phonenumber,\n email, password, passport_url, admin) VALUES ('Bedan', 'Kimani',\n '0712068754', '{}', '{}',\n 'https://cdn2.iconfinder.com/data/icons/avatar-2/512/kan_boy-512.png',\n True)\n ".format(self.admin_email, generate_password_hash(self.admin_password)))
conn.commit() | def create_super_user(self):
' '
query = "SELECT * FROM users WHERE email = '[email protected]'"
cur.execute(query)
user = cur.fetchone()
if (not user):
cur.execute("INSERT INTO users (firstname, lastname, phonenumber,\n email, password, passport_url, admin) VALUES ('Bedan', 'Kimani',\n '0712068754', '{}', '{}',\n 'https://cdn2.iconfinder.com/data/icons/avatar-2/512/kan_boy-512.png',\n True)\n ".format(self.admin_email, generate_password_hash(self.admin_password)))
conn.commit()<|docstring|>creates a default user who is an admin<|endoftext|> |
16708a39c9e1bb46f8338eba71e225d0cf7d738b4b4eb54f04a783619743f42b | def insert(self, query):
' Add new item in the db '
cur.execute(query)
data = cur.fetchone()
conn.commit()
return data | Add new item in the db | app/v2/db/database_config.py | insert | martinMutuma/def-politico | 0 | python | def insert(self, query):
' '
cur.execute(query)
data = cur.fetchone()
conn.commit()
return data | def insert(self, query):
' '
cur.execute(query)
data = cur.fetchone()
conn.commit()
return data<|docstring|>Add new item in the db<|endoftext|> |
633ab661bf0f2f39cb6885c777b8d84d8af80e13cad05b891678139e252ab40e | def get_one(self, query):
' Get one item form the db '
cur.execute(query)
data = cur.fetchone()
return data | Get one item form the db | app/v2/db/database_config.py | get_one | martinMutuma/def-politico | 0 | python | def get_one(self, query):
' '
cur.execute(query)
data = cur.fetchone()
return data | def get_one(self, query):
' '
cur.execute(query)
data = cur.fetchone()
return data<|docstring|>Get one item form the db<|endoftext|> |
78d6917ada2a1500f525e989e8fe63e23c49f497c7b0de6a8630d03f23a49e6f | def get_all(self, query):
' Get all items from the db '
cur.execute(query)
data = cur.fetchall()
return data | Get all items from the db | app/v2/db/database_config.py | get_all | martinMutuma/def-politico | 0 | python | def get_all(self, query):
' '
cur.execute(query)
data = cur.fetchall()
return data | def get_all(self, query):
' '
cur.execute(query)
data = cur.fetchall()
return data<|docstring|>Get all items from the db<|endoftext|> |
8535eb060bed0d7d7bd6513c455666412721c1d259ddcb04e05177d8481734a6 | def execute(self, query):
' Execute any other query '
cur.execute(query)
conn.commit() | Execute any other query | app/v2/db/database_config.py | execute | martinMutuma/def-politico | 0 | python | def execute(self, query):
' '
cur.execute(query)
conn.commit() | def execute(self, query):
' '
cur.execute(query)
conn.commit()<|docstring|>Execute any other query<|endoftext|> |
e7ea5827f2a37df11b28cd48d729a0cee91a78a59fd38710ce7ad60b8c61e9e3 | def truncate(self):
' Clear all database table '
cur.execute((('TRUNCATE TABLE ' + ','.join(table_names)) + ' CASCADE'))
conn.commit() | Clear all database table | app/v2/db/database_config.py | truncate | martinMutuma/def-politico | 0 | python | def truncate(self):
' '
cur.execute((('TRUNCATE TABLE ' + ','.join(table_names)) + ' CASCADE'))
conn.commit() | def truncate(self):
' '
cur.execute((('TRUNCATE TABLE ' + ','.join(table_names)) + ' CASCADE'))
conn.commit()<|docstring|>Clear all database table<|endoftext|> |
e290db9b7e1b3f83fd2780add618ee108937bcfc6598040b661d5b96e5db6cf8 | def __init__(self, min_face_size: int=20, steps_threshold: list=None, scale_factor: float=0.709, runner_cls=None):
"\n Initializes the MTCNN.\n :param min_face_size: minimum size of the face to detect\n :param steps_threshold: step's thresholds values\n :param scale_factor: scale factor\n "
if (steps_threshold is None):
steps_threshold = [0.6, 0.7, 0.7]
self._min_face_size = min_face_size
self._steps_threshold = steps_threshold
self._scale_factor = scale_factor
pnet_path = os.path.join(os.path.dirname(__file__), 'pnet.onnx')
rnet_path = os.path.join(os.path.dirname(__file__), 'rnet.onnx')
onet_path = os.path.join(os.path.dirname(__file__), 'onet.onnx')
'\n self._pnet = cv2.dnn.readNetFromONNX(pnet_path)\n self._rnet = cv2.dnn.readNetFromONNX(rnet_path)\n self._onet = cv2.dnn.readNetFromONNX(onet_path)\n '
self._pnet = load_model(pnet_path, runner_cls)
self._rnet = load_model(rnet_path, runner_cls)
self._onet = load_model(onet_path, runner_cls) | Initializes the MTCNN.
:param min_face_size: minimum size of the face to detect
:param steps_threshold: step's thresholds values
:param scale_factor: scale factor | mtcnn_ort/mtcnn_ort.py | __init__ | yiyuezhuo/mtcnn-onnxruntime | 6 | python | def __init__(self, min_face_size: int=20, steps_threshold: list=None, scale_factor: float=0.709, runner_cls=None):
"\n Initializes the MTCNN.\n :param min_face_size: minimum size of the face to detect\n :param steps_threshold: step's thresholds values\n :param scale_factor: scale factor\n "
if (steps_threshold is None):
steps_threshold = [0.6, 0.7, 0.7]
self._min_face_size = min_face_size
self._steps_threshold = steps_threshold
self._scale_factor = scale_factor
pnet_path = os.path.join(os.path.dirname(__file__), 'pnet.onnx')
rnet_path = os.path.join(os.path.dirname(__file__), 'rnet.onnx')
onet_path = os.path.join(os.path.dirname(__file__), 'onet.onnx')
'\n self._pnet = cv2.dnn.readNetFromONNX(pnet_path)\n self._rnet = cv2.dnn.readNetFromONNX(rnet_path)\n self._onet = cv2.dnn.readNetFromONNX(onet_path)\n '
self._pnet = load_model(pnet_path, runner_cls)
self._rnet = load_model(rnet_path, runner_cls)
self._onet = load_model(onet_path, runner_cls) | def __init__(self, min_face_size: int=20, steps_threshold: list=None, scale_factor: float=0.709, runner_cls=None):
"\n Initializes the MTCNN.\n :param min_face_size: minimum size of the face to detect\n :param steps_threshold: step's thresholds values\n :param scale_factor: scale factor\n "
if (steps_threshold is None):
steps_threshold = [0.6, 0.7, 0.7]
self._min_face_size = min_face_size
self._steps_threshold = steps_threshold
self._scale_factor = scale_factor
pnet_path = os.path.join(os.path.dirname(__file__), 'pnet.onnx')
rnet_path = os.path.join(os.path.dirname(__file__), 'rnet.onnx')
onet_path = os.path.join(os.path.dirname(__file__), 'onet.onnx')
'\n self._pnet = cv2.dnn.readNetFromONNX(pnet_path)\n self._rnet = cv2.dnn.readNetFromONNX(rnet_path)\n self._onet = cv2.dnn.readNetFromONNX(onet_path)\n '
self._pnet = load_model(pnet_path, runner_cls)
self._rnet = load_model(rnet_path, runner_cls)
self._onet = load_model(onet_path, runner_cls)<|docstring|>Initializes the MTCNN.
:param min_face_size: minimum size of the face to detect
:param steps_threshold: step's thresholds values
:param scale_factor: scale factor<|endoftext|> |
bfe12643347b69f12997cbd17259dac68285cdd6f964fe382d729804d89e9f0c | @staticmethod
def __scale_image(image, scale: float):
'\n Scales the image to a given scale.\n :param image:\n :param scale:\n :return:\n '
(height, width, _) = image.shape
width_scaled = int(np.ceil((width * scale)))
height_scaled = int(np.ceil((height * scale)))
im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA)
im_data_normalized = ((im_data - 127.5) * 0.0078125)
return im_data_normalized | Scales the image to a given scale.
:param image:
:param scale:
:return: | mtcnn_ort/mtcnn_ort.py | __scale_image | yiyuezhuo/mtcnn-onnxruntime | 6 | python | @staticmethod
def __scale_image(image, scale: float):
'\n Scales the image to a given scale.\n :param image:\n :param scale:\n :return:\n '
(height, width, _) = image.shape
width_scaled = int(np.ceil((width * scale)))
height_scaled = int(np.ceil((height * scale)))
im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA)
im_data_normalized = ((im_data - 127.5) * 0.0078125)
return im_data_normalized | @staticmethod
def __scale_image(image, scale: float):
'\n Scales the image to a given scale.\n :param image:\n :param scale:\n :return:\n '
(height, width, _) = image.shape
width_scaled = int(np.ceil((width * scale)))
height_scaled = int(np.ceil((height * scale)))
im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA)
im_data_normalized = ((im_data - 127.5) * 0.0078125)
return im_data_normalized<|docstring|>Scales the image to a given scale.
:param image:
:param scale:
:return:<|endoftext|> |
cb2068d039b5ae99acc2d73b0400f523156f41dfa3bceb8bb049c6fa72d381c0 | @staticmethod
def __nms(boxes, threshold, method):
"\n Non Maximum Suppression.\n :param boxes: np array with bounding boxes.\n :param threshold:\n :param method: NMS method to apply. Available values ('Min', 'Union')\n :return:\n "
if (boxes.size == 0):
return np.empty((0, 3))
x1 = boxes[(:, 0)]
y1 = boxes[(:, 1)]
x2 = boxes[(:, 2)]
y2 = boxes[(:, 3)]
s = boxes[(:, 4)]
area = (((x2 - x1) + 1) * ((y2 - y1) + 1))
sorted_s = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while (sorted_s.size > 0):
i = sorted_s[(- 1)]
pick[counter] = i
counter += 1
idx = sorted_s[0:(- 1)]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, ((xx2 - xx1) + 1))
h = np.maximum(0.0, ((yy2 - yy1) + 1))
inter = (w * h)
if (method == 'Min'):
o = (inter / np.minimum(area[i], area[idx]))
else:
o = (inter / ((area[i] + area[idx]) - inter))
sorted_s = sorted_s[np.where((o <= threshold))]
pick = pick[0:counter]
return pick | Non Maximum Suppression.
:param boxes: np array with bounding boxes.
:param threshold:
:param method: NMS method to apply. Available values ('Min', 'Union')
:return: | mtcnn_ort/mtcnn_ort.py | __nms | yiyuezhuo/mtcnn-onnxruntime | 6 | python | @staticmethod
def __nms(boxes, threshold, method):
"\n Non Maximum Suppression.\n :param boxes: np array with bounding boxes.\n :param threshold:\n :param method: NMS method to apply. Available values ('Min', 'Union')\n :return:\n "
if (boxes.size == 0):
return np.empty((0, 3))
x1 = boxes[(:, 0)]
y1 = boxes[(:, 1)]
x2 = boxes[(:, 2)]
y2 = boxes[(:, 3)]
s = boxes[(:, 4)]
area = (((x2 - x1) + 1) * ((y2 - y1) + 1))
sorted_s = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while (sorted_s.size > 0):
i = sorted_s[(- 1)]
pick[counter] = i
counter += 1
idx = sorted_s[0:(- 1)]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, ((xx2 - xx1) + 1))
h = np.maximum(0.0, ((yy2 - yy1) + 1))
inter = (w * h)
if (method == 'Min'):
o = (inter / np.minimum(area[i], area[idx]))
else:
o = (inter / ((area[i] + area[idx]) - inter))
sorted_s = sorted_s[np.where((o <= threshold))]
pick = pick[0:counter]
return pick | @staticmethod
def __nms(boxes, threshold, method):
"\n Non Maximum Suppression.\n :param boxes: np array with bounding boxes.\n :param threshold:\n :param method: NMS method to apply. Available values ('Min', 'Union')\n :return:\n "
if (boxes.size == 0):
return np.empty((0, 3))
x1 = boxes[(:, 0)]
y1 = boxes[(:, 1)]
x2 = boxes[(:, 2)]
y2 = boxes[(:, 3)]
s = boxes[(:, 4)]
area = (((x2 - x1) + 1) * ((y2 - y1) + 1))
sorted_s = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while (sorted_s.size > 0):
i = sorted_s[(- 1)]
pick[counter] = i
counter += 1
idx = sorted_s[0:(- 1)]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, ((xx2 - xx1) + 1))
h = np.maximum(0.0, ((yy2 - yy1) + 1))
inter = (w * h)
if (method == 'Min'):
o = (inter / np.minimum(area[i], area[idx]))
else:
o = (inter / ((area[i] + area[idx]) - inter))
sorted_s = sorted_s[np.where((o <= threshold))]
pick = pick[0:counter]
return pick<|docstring|>Non Maximum Suppression.
:param boxes: np array with bounding boxes.
:param threshold:
:param method: NMS method to apply. Available values ('Min', 'Union')
:return:<|endoftext|> |
b68c110c699373c96e7cb46b2eff4b2d09d7dbd7a9df383accaee726c137a904 | def detect_faces(self, img) -> list:
'\n Detects bounding boxes from the specified image.\n :param img: image to process\n :return: list containing all the bounding boxes detected with their keypoints. box: (x, y, w, h), point: (x, y)\n '
(total_boxes, points) = self.detect_faces_raw(img)
bounding_boxes = []
for (bounding_box, keypoints) in zip(total_boxes, points.T):
x = max(0, int(bounding_box[0]))
y = max(0, int(bounding_box[1]))
width = int((bounding_box[2] - x))
height = int((bounding_box[3] - y))
bounding_boxes.append({'box': [x, y, width, height], 'confidence': bounding_box[(- 1)], 'keypoints': {'left_eye': (int(keypoints[0]), int(keypoints[5])), 'right_eye': (int(keypoints[1]), int(keypoints[6])), 'nose': (int(keypoints[2]), int(keypoints[7])), 'mouth_left': (int(keypoints[3]), int(keypoints[8])), 'mouth_right': (int(keypoints[4]), int(keypoints[9]))}})
return bounding_boxes | Detects bounding boxes from the specified image.
:param img: image to process
:return: list containing all the bounding boxes detected with their keypoints. box: (x, y, w, h), point: (x, y) | mtcnn_ort/mtcnn_ort.py | detect_faces | yiyuezhuo/mtcnn-onnxruntime | 6 | python | def detect_faces(self, img) -> list:
'\n Detects bounding boxes from the specified image.\n :param img: image to process\n :return: list containing all the bounding boxes detected with their keypoints. box: (x, y, w, h), point: (x, y)\n '
(total_boxes, points) = self.detect_faces_raw(img)
bounding_boxes = []
for (bounding_box, keypoints) in zip(total_boxes, points.T):
x = max(0, int(bounding_box[0]))
y = max(0, int(bounding_box[1]))
width = int((bounding_box[2] - x))
height = int((bounding_box[3] - y))
bounding_boxes.append({'box': [x, y, width, height], 'confidence': bounding_box[(- 1)], 'keypoints': {'left_eye': (int(keypoints[0]), int(keypoints[5])), 'right_eye': (int(keypoints[1]), int(keypoints[6])), 'nose': (int(keypoints[2]), int(keypoints[7])), 'mouth_left': (int(keypoints[3]), int(keypoints[8])), 'mouth_right': (int(keypoints[4]), int(keypoints[9]))}})
return bounding_boxes | def detect_faces(self, img) -> list:
'\n Detects bounding boxes from the specified image.\n :param img: image to process\n :return: list containing all the bounding boxes detected with their keypoints. box: (x, y, w, h), point: (x, y)\n '
(total_boxes, points) = self.detect_faces_raw(img)
bounding_boxes = []
for (bounding_box, keypoints) in zip(total_boxes, points.T):
x = max(0, int(bounding_box[0]))
y = max(0, int(bounding_box[1]))
width = int((bounding_box[2] - x))
height = int((bounding_box[3] - y))
bounding_boxes.append({'box': [x, y, width, height], 'confidence': bounding_box[(- 1)], 'keypoints': {'left_eye': (int(keypoints[0]), int(keypoints[5])), 'right_eye': (int(keypoints[1]), int(keypoints[6])), 'nose': (int(keypoints[2]), int(keypoints[7])), 'mouth_left': (int(keypoints[3]), int(keypoints[8])), 'mouth_right': (int(keypoints[4]), int(keypoints[9]))}})
return bounding_boxes<|docstring|>Detects bounding boxes from the specified image.
:param img: image to process
:return: list containing all the bounding boxes detected with their keypoints. box: (x, y, w, h), point: (x, y)<|endoftext|> |
1a19b9e14ada165034bf266aa132a1327ed36b7dcc48f969ecf9e7d8dec1c558 | def mark_faces(self, image_data) -> bytes:
'\n Mark all the faces\n '
ext = imghdr.what(None, image_data)
im = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)
image = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
results = self.detect_faces(image)
for result in results:
bounding_box = result['box']
keypoints = result['keypoints']
cv2.rectangle(image, (bounding_box[0], bounding_box[1]), ((bounding_box[0] + bounding_box[2]), (bounding_box[1] + bounding_box[3])), (0, 155, 255), 2)
cv2.circle(image, keypoints['left_eye'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['right_eye'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['nose'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['mouth_left'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['mouth_right'], 2, (0, 155, 255), 2)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
(is_success, im_buf_arr) = cv2.imencode(('.' + ext), image)
return im_buf_arr | Mark all the faces | mtcnn_ort/mtcnn_ort.py | mark_faces | yiyuezhuo/mtcnn-onnxruntime | 6 | python | def mark_faces(self, image_data) -> bytes:
'\n \n '
ext = imghdr.what(None, image_data)
im = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)
image = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
results = self.detect_faces(image)
for result in results:
bounding_box = result['box']
keypoints = result['keypoints']
cv2.rectangle(image, (bounding_box[0], bounding_box[1]), ((bounding_box[0] + bounding_box[2]), (bounding_box[1] + bounding_box[3])), (0, 155, 255), 2)
cv2.circle(image, keypoints['left_eye'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['right_eye'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['nose'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['mouth_left'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['mouth_right'], 2, (0, 155, 255), 2)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
(is_success, im_buf_arr) = cv2.imencode(('.' + ext), image)
return im_buf_arr | def mark_faces(self, image_data) -> bytes:
'\n \n '
ext = imghdr.what(None, image_data)
im = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)
image = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
results = self.detect_faces(image)
for result in results:
bounding_box = result['box']
keypoints = result['keypoints']
cv2.rectangle(image, (bounding_box[0], bounding_box[1]), ((bounding_box[0] + bounding_box[2]), (bounding_box[1] + bounding_box[3])), (0, 155, 255), 2)
cv2.circle(image, keypoints['left_eye'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['right_eye'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['nose'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['mouth_left'], 2, (0, 155, 255), 2)
cv2.circle(image, keypoints['mouth_right'], 2, (0, 155, 255), 2)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
(is_success, im_buf_arr) = cv2.imencode(('.' + ext), image)
return im_buf_arr<|docstring|>Mark all the faces<|endoftext|> |
f28931ec3909cd9940573169afe52097ee8d6c416296c52b3e0f5bc0ff5dee72 | def __stage1(self, image, scales: list, stage_status: StageStatus):
'\n First stage of the MTCNN.\n :param image:\n :param scales:\n :param stage_status:\n :return:\n '
total_boxes = np.empty((0, 9))
status = stage_status
for scale in scales:
scaled_image = self.__scale_image(image, scale)
img_x = np.expand_dims(scaled_image, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
"\n self._pnet.setInput(img_y)\n out = self._pnet.forward(['conv2d_4', 'softmax'])\n "
out = self._pnet(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
(boxes, _) = self.__generate_bounding_box(out1[(0, :, :, 1)].copy(), out0[(0, :, :, :)].copy(), scale, self._steps_threshold[0])
pick = self.__nms(boxes.copy(), 0.5, 'Union')
if ((boxes.size > 0) and (pick.size > 0)):
boxes = boxes[(pick, :)]
total_boxes = np.append(total_boxes, boxes, axis=0)
numboxes = total_boxes.shape[0]
if (numboxes > 0):
pick = self.__nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[(pick, :)]
regw = (total_boxes[(:, 2)] - total_boxes[(:, 0)])
regh = (total_boxes[(:, 3)] - total_boxes[(:, 1)])
qq1 = (total_boxes[(:, 0)] + (total_boxes[(:, 5)] * regw))
qq2 = (total_boxes[(:, 1)] + (total_boxes[(:, 6)] * regh))
qq3 = (total_boxes[(:, 2)] + (total_boxes[(:, 7)] * regw))
qq4 = (total_boxes[(:, 3)] + (total_boxes[(:, 8)] * regh))
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[(:, 4)]]))
total_boxes = self.__rerec(total_boxes.copy())
total_boxes[(:, 0:4)] = np.fix(total_boxes[(:, 0:4)]).astype(np.int32)
status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height)
return (total_boxes, status) | First stage of the MTCNN.
:param image:
:param scales:
:param stage_status:
:return: | mtcnn_ort/mtcnn_ort.py | __stage1 | yiyuezhuo/mtcnn-onnxruntime | 6 | python | def __stage1(self, image, scales: list, stage_status: StageStatus):
'\n First stage of the MTCNN.\n :param image:\n :param scales:\n :param stage_status:\n :return:\n '
total_boxes = np.empty((0, 9))
status = stage_status
for scale in scales:
scaled_image = self.__scale_image(image, scale)
img_x = np.expand_dims(scaled_image, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
"\n self._pnet.setInput(img_y)\n out = self._pnet.forward(['conv2d_4', 'softmax'])\n "
out = self._pnet(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
(boxes, _) = self.__generate_bounding_box(out1[(0, :, :, 1)].copy(), out0[(0, :, :, :)].copy(), scale, self._steps_threshold[0])
pick = self.__nms(boxes.copy(), 0.5, 'Union')
if ((boxes.size > 0) and (pick.size > 0)):
boxes = boxes[(pick, :)]
total_boxes = np.append(total_boxes, boxes, axis=0)
numboxes = total_boxes.shape[0]
if (numboxes > 0):
pick = self.__nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[(pick, :)]
regw = (total_boxes[(:, 2)] - total_boxes[(:, 0)])
regh = (total_boxes[(:, 3)] - total_boxes[(:, 1)])
qq1 = (total_boxes[(:, 0)] + (total_boxes[(:, 5)] * regw))
qq2 = (total_boxes[(:, 1)] + (total_boxes[(:, 6)] * regh))
qq3 = (total_boxes[(:, 2)] + (total_boxes[(:, 7)] * regw))
qq4 = (total_boxes[(:, 3)] + (total_boxes[(:, 8)] * regh))
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[(:, 4)]]))
total_boxes = self.__rerec(total_boxes.copy())
total_boxes[(:, 0:4)] = np.fix(total_boxes[(:, 0:4)]).astype(np.int32)
status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height)
return (total_boxes, status) | def __stage1(self, image, scales: list, stage_status: StageStatus):
'\n First stage of the MTCNN.\n :param image:\n :param scales:\n :param stage_status:\n :return:\n '
total_boxes = np.empty((0, 9))
status = stage_status
for scale in scales:
scaled_image = self.__scale_image(image, scale)
img_x = np.expand_dims(scaled_image, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
"\n self._pnet.setInput(img_y)\n out = self._pnet.forward(['conv2d_4', 'softmax'])\n "
out = self._pnet(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
(boxes, _) = self.__generate_bounding_box(out1[(0, :, :, 1)].copy(), out0[(0, :, :, :)].copy(), scale, self._steps_threshold[0])
pick = self.__nms(boxes.copy(), 0.5, 'Union')
if ((boxes.size > 0) and (pick.size > 0)):
boxes = boxes[(pick, :)]
total_boxes = np.append(total_boxes, boxes, axis=0)
numboxes = total_boxes.shape[0]
if (numboxes > 0):
pick = self.__nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[(pick, :)]
regw = (total_boxes[(:, 2)] - total_boxes[(:, 0)])
regh = (total_boxes[(:, 3)] - total_boxes[(:, 1)])
qq1 = (total_boxes[(:, 0)] + (total_boxes[(:, 5)] * regw))
qq2 = (total_boxes[(:, 1)] + (total_boxes[(:, 6)] * regh))
qq3 = (total_boxes[(:, 2)] + (total_boxes[(:, 7)] * regw))
qq4 = (total_boxes[(:, 3)] + (total_boxes[(:, 8)] * regh))
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[(:, 4)]]))
total_boxes = self.__rerec(total_boxes.copy())
total_boxes[(:, 0:4)] = np.fix(total_boxes[(:, 0:4)]).astype(np.int32)
status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height)
return (total_boxes, status)<|docstring|>First stage of the MTCNN.
:param image:
:param scales:
:param stage_status:
:return:<|endoftext|> |
d7192790f656e537139ebd4499a96ffcac6afb0856dc6db92254df8b75646d96 | def __stage2(self, img, total_boxes, stage_status: StageStatus):
'\n Second stage of the MTCNN.\n :param img:\n :param total_boxes:\n :param stage_status:\n :return:\n '
num_boxes = total_boxes.shape[0]
if (num_boxes == 0):
return (total_boxes, stage_status)
tempimg = np.zeros(shape=(24, 24, 3, num_boxes))
for k in range(0, num_boxes):
tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3))
tmp[((stage_status.dy[k] - 1):stage_status.edy[k], (stage_status.dx[k] - 1):stage_status.edx[k], :)] = img[((stage_status.y[k] - 1):stage_status.ey[k], (stage_status.x[k] - 1):stage_status.ex[k], :)]
if (((tmp.shape[0] > 0) and (tmp.shape[1] > 0)) or ((tmp.shape[0] == 0) and (tmp.shape[1] == 0))):
tempimg[(:, :, :, k)] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA)
else:
return (np.empty(shape=(0,)), stage_status)
tempimg = ((tempimg - 127.5) * 0.0078125)
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
"\n self._rnet.setInput(tempimg1)\n out = self._rnet.forward(['dense_2', 'softmax_1'])\n "
out = self._rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[(1, :)]
ipass = np.where((score > self._steps_threshold[1]))
total_boxes = np.hstack([total_boxes[(ipass[0], 0:4)].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[(:, ipass[0])]
if (total_boxes.shape[0] > 0):
pick = self.__nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[(pick, :)]
total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[(:, pick)]))
total_boxes = self.__rerec(total_boxes.copy())
return (total_boxes, stage_status) | Second stage of the MTCNN.
:param img:
:param total_boxes:
:param stage_status:
:return: | mtcnn_ort/mtcnn_ort.py | __stage2 | yiyuezhuo/mtcnn-onnxruntime | 6 | python | def __stage2(self, img, total_boxes, stage_status: StageStatus):
'\n Second stage of the MTCNN.\n :param img:\n :param total_boxes:\n :param stage_status:\n :return:\n '
num_boxes = total_boxes.shape[0]
if (num_boxes == 0):
return (total_boxes, stage_status)
tempimg = np.zeros(shape=(24, 24, 3, num_boxes))
for k in range(0, num_boxes):
tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3))
tmp[((stage_status.dy[k] - 1):stage_status.edy[k], (stage_status.dx[k] - 1):stage_status.edx[k], :)] = img[((stage_status.y[k] - 1):stage_status.ey[k], (stage_status.x[k] - 1):stage_status.ex[k], :)]
if (((tmp.shape[0] > 0) and (tmp.shape[1] > 0)) or ((tmp.shape[0] == 0) and (tmp.shape[1] == 0))):
tempimg[(:, :, :, k)] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA)
else:
return (np.empty(shape=(0,)), stage_status)
tempimg = ((tempimg - 127.5) * 0.0078125)
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
"\n self._rnet.setInput(tempimg1)\n out = self._rnet.forward(['dense_2', 'softmax_1'])\n "
out = self._rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[(1, :)]
ipass = np.where((score > self._steps_threshold[1]))
total_boxes = np.hstack([total_boxes[(ipass[0], 0:4)].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[(:, ipass[0])]
if (total_boxes.shape[0] > 0):
pick = self.__nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[(pick, :)]
total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[(:, pick)]))
total_boxes = self.__rerec(total_boxes.copy())
return (total_boxes, stage_status) | def __stage2(self, img, total_boxes, stage_status: StageStatus):
'\n Second stage of the MTCNN.\n :param img:\n :param total_boxes:\n :param stage_status:\n :return:\n '
num_boxes = total_boxes.shape[0]
if (num_boxes == 0):
return (total_boxes, stage_status)
tempimg = np.zeros(shape=(24, 24, 3, num_boxes))
for k in range(0, num_boxes):
tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3))
tmp[((stage_status.dy[k] - 1):stage_status.edy[k], (stage_status.dx[k] - 1):stage_status.edx[k], :)] = img[((stage_status.y[k] - 1):stage_status.ey[k], (stage_status.x[k] - 1):stage_status.ex[k], :)]
if (((tmp.shape[0] > 0) and (tmp.shape[1] > 0)) or ((tmp.shape[0] == 0) and (tmp.shape[1] == 0))):
tempimg[(:, :, :, k)] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA)
else:
return (np.empty(shape=(0,)), stage_status)
tempimg = ((tempimg - 127.5) * 0.0078125)
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
"\n self._rnet.setInput(tempimg1)\n out = self._rnet.forward(['dense_2', 'softmax_1'])\n "
out = self._rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[(1, :)]
ipass = np.where((score > self._steps_threshold[1]))
total_boxes = np.hstack([total_boxes[(ipass[0], 0:4)].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[(:, ipass[0])]
if (total_boxes.shape[0] > 0):
pick = self.__nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[(pick, :)]
total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[(:, pick)]))
total_boxes = self.__rerec(total_boxes.copy())
return (total_boxes, stage_status)<|docstring|>Second stage of the MTCNN.
:param img:
:param total_boxes:
:param stage_status:
:return:<|endoftext|> |
0677f3583d973b0724c6713fa3bccd8198056704e90aa347ef35df545418628a | def __stage3(self, img, total_boxes, stage_status: StageStatus):
'\n Third stage of the MTCNN.\n :param img:\n :param total_boxes:\n :param stage_status:\n :return:\n '
num_boxes = total_boxes.shape[0]
if (num_boxes == 0):
return (total_boxes, np.empty(shape=(0,)))
total_boxes = np.fix(total_boxes).astype(np.int32)
status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height)
tempimg = np.zeros((48, 48, 3, num_boxes))
for k in range(0, num_boxes):
tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3))
tmp[((status.dy[k] - 1):status.edy[k], (status.dx[k] - 1):status.edx[k], :)] = img[((status.y[k] - 1):status.ey[k], (status.x[k] - 1):status.ex[k], :)]
if (((tmp.shape[0] > 0) and (tmp.shape[1] > 0)) or ((tmp.shape[0] == 0) and (tmp.shape[1] == 0))):
tempimg[(:, :, :, k)] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA)
else:
return (np.empty(shape=(0,)), np.empty(shape=(0,)))
tempimg = ((tempimg - 127.5) * 0.0078125)
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
"\n self._onet.setInput(tempimg1)\n out = self._onet.forward(['dense_5', 'dense_6', 'softmax_2'])\n "
out = self._onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[(1, :)]
points = out1
ipass = np.where((score > self._steps_threshold[2]))
points = points[(:, ipass[0])]
total_boxes = np.hstack([total_boxes[(ipass[0], 0:4)].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[(:, ipass[0])]
w = ((total_boxes[(:, 2)] - total_boxes[(:, 0)]) + 1)
h = ((total_boxes[(:, 3)] - total_boxes[(:, 1)]) + 1)
points[(0:5, :)] = (((np.tile(w, (5, 1)) * points[(0:5, :)]) + np.tile(total_boxes[(:, 0)], (5, 1))) - 1)
points[(5:10, :)] = (((np.tile(h, (5, 1)) * points[(5:10, :)]) + np.tile(total_boxes[(:, 1)], (5, 1))) - 1)
if (total_boxes.shape[0] > 0):
total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv))
pick = self.__nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[(pick, :)]
points = points[(:, pick)]
return (total_boxes, points) | Third stage of the MTCNN.
:param img:
:param total_boxes:
:param stage_status:
:return: | mtcnn_ort/mtcnn_ort.py | __stage3 | yiyuezhuo/mtcnn-onnxruntime | 6 | python | def __stage3(self, img, total_boxes, stage_status: StageStatus):
'\n Third stage of the MTCNN.\n :param img:\n :param total_boxes:\n :param stage_status:\n :return:\n '
num_boxes = total_boxes.shape[0]
if (num_boxes == 0):
return (total_boxes, np.empty(shape=(0,)))
total_boxes = np.fix(total_boxes).astype(np.int32)
status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height)
tempimg = np.zeros((48, 48, 3, num_boxes))
for k in range(0, num_boxes):
tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3))
tmp[((status.dy[k] - 1):status.edy[k], (status.dx[k] - 1):status.edx[k], :)] = img[((status.y[k] - 1):status.ey[k], (status.x[k] - 1):status.ex[k], :)]
if (((tmp.shape[0] > 0) and (tmp.shape[1] > 0)) or ((tmp.shape[0] == 0) and (tmp.shape[1] == 0))):
tempimg[(:, :, :, k)] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA)
else:
return (np.empty(shape=(0,)), np.empty(shape=(0,)))
tempimg = ((tempimg - 127.5) * 0.0078125)
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
"\n self._onet.setInput(tempimg1)\n out = self._onet.forward(['dense_5', 'dense_6', 'softmax_2'])\n "
out = self._onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[(1, :)]
points = out1
ipass = np.where((score > self._steps_threshold[2]))
points = points[(:, ipass[0])]
total_boxes = np.hstack([total_boxes[(ipass[0], 0:4)].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[(:, ipass[0])]
w = ((total_boxes[(:, 2)] - total_boxes[(:, 0)]) + 1)
h = ((total_boxes[(:, 3)] - total_boxes[(:, 1)]) + 1)
points[(0:5, :)] = (((np.tile(w, (5, 1)) * points[(0:5, :)]) + np.tile(total_boxes[(:, 0)], (5, 1))) - 1)
points[(5:10, :)] = (((np.tile(h, (5, 1)) * points[(5:10, :)]) + np.tile(total_boxes[(:, 1)], (5, 1))) - 1)
if (total_boxes.shape[0] > 0):
total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv))
pick = self.__nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[(pick, :)]
points = points[(:, pick)]
return (total_boxes, points) | def __stage3(self, img, total_boxes, stage_status: StageStatus):
'\n Third stage of the MTCNN.\n :param img:\n :param total_boxes:\n :param stage_status:\n :return:\n '
num_boxes = total_boxes.shape[0]
if (num_boxes == 0):
return (total_boxes, np.empty(shape=(0,)))
total_boxes = np.fix(total_boxes).astype(np.int32)
status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height)
tempimg = np.zeros((48, 48, 3, num_boxes))
for k in range(0, num_boxes):
tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3))
tmp[((status.dy[k] - 1):status.edy[k], (status.dx[k] - 1):status.edx[k], :)] = img[((status.y[k] - 1):status.ey[k], (status.x[k] - 1):status.ex[k], :)]
if (((tmp.shape[0] > 0) and (tmp.shape[1] > 0)) or ((tmp.shape[0] == 0) and (tmp.shape[1] == 0))):
tempimg[(:, :, :, k)] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA)
else:
return (np.empty(shape=(0,)), np.empty(shape=(0,)))
tempimg = ((tempimg - 127.5) * 0.0078125)
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
"\n self._onet.setInput(tempimg1)\n out = self._onet.forward(['dense_5', 'dense_6', 'softmax_2'])\n "
out = self._onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[(1, :)]
points = out1
ipass = np.where((score > self._steps_threshold[2]))
points = points[(:, ipass[0])]
total_boxes = np.hstack([total_boxes[(ipass[0], 0:4)].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[(:, ipass[0])]
w = ((total_boxes[(:, 2)] - total_boxes[(:, 0)]) + 1)
h = ((total_boxes[(:, 3)] - total_boxes[(:, 1)]) + 1)
points[(0:5, :)] = (((np.tile(w, (5, 1)) * points[(0:5, :)]) + np.tile(total_boxes[(:, 0)], (5, 1))) - 1)
points[(5:10, :)] = (((np.tile(h, (5, 1)) * points[(5:10, :)]) + np.tile(total_boxes[(:, 1)], (5, 1))) - 1)
if (total_boxes.shape[0] > 0):
total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv))
pick = self.__nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[(pick, :)]
points = points[(:, pick)]
return (total_boxes, points)<|docstring|>Third stage of the MTCNN.
:param img:
:param total_boxes:
:param stage_status:
:return:<|endoftext|> |
df199b114696f6aaee8e78d8d873ef16b23cf58bb4aa4f7e375341f411732fc7 | def __init__(self, runtime_group: RuntimeGroup):
'Initialization method.\n\n Args:\n runtime_group: The group where the workers will be started.\n '
super().__init__(runtime_group)
self.dbpath: Optional[str] = None | Initialization method.
Args:
runtime_group: The group where the workers will be started. | src/lazycluster/cluster/hyperopt_cluster.py | __init__ | prototypefund/lazycluster | 44 | python | def __init__(self, runtime_group: RuntimeGroup):
'Initialization method.\n\n Args:\n runtime_group: The group where the workers will be started.\n '
super().__init__(runtime_group)
self.dbpath: Optional[str] = None | def __init__(self, runtime_group: RuntimeGroup):
'Initialization method.\n\n Args:\n runtime_group: The group where the workers will be started.\n '
super().__init__(runtime_group)
self.dbpath: Optional[str] = None<|docstring|>Initialization method.
Args:
runtime_group: The group where the workers will be started.<|endoftext|> |
bf449272052829cfb2be3b0ae5c381585718efb5d06aeef3fa8879b0703c75c3 | def start(self, ports: Union[(List[int], int)], timeout: int=0, debug: bool=False) -> List[int]:
'Launch a master instance.\n\n Note:\n If you create a custom subclass of MasterLauncher which will not start the master instance on localhost\n then you should pass the debug flag on to `execute_task()` of the `RuntimeGroup` or `Runtime` so that you\n can benefit from the debug feature of `RuntimeTask.execute()`.\n\n Args:\n ports: Port where the DB should be started. If a list is given then the first port that is free in the\n `RuntimeGroup` will be used. The actual chosen port can be requested via the property `port`.\n timeout: Timeout (s) after which an MasterStartError is raised if DB instance not started yet. Defaults to\n 3 seconds.\n debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then\n the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to\n `False`.\n\n Returns:\n List[int]: In case a port list was given the updated port list will be returned. Otherwise an empty list.\n\n Raises:\n PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.\n NoPortsLeftError: If a port list was given and none of the ports is actually free in the `RuntimeGroup`.\n MasterStartError: If master was not started after the specified `timeout`.\n '
if debug:
self.log.debug('The debug flag has no effect in LocalMongoLauncher.')
_ports: Union[(List[int], int)] = (ports.copy() if isinstance(ports, list) else ports)
if (not isinstance(_ports, list)):
if (_utils.localhost_has_free_port(_ports) and self._group.has_free_port(_ports, exclude_hosts=Runtime.LOCALHOST)):
self._port = master_port = _ports
else:
raise PortInUseError(_ports, self._group)
else:
self._port = master_port = self._group.get_free_port(_ports)
_ports = _utils.get_remaining_ports(_ports, master_port)
self.log.debug(f'Starting MongoDB on localhost on port {str(master_port)} with dbpath `{self.dbpath}` and logfile `{self.dbpath}/{HyperoptCluster.MONGO_LOG_FILENAME}`.')
return_code = os.system(self.get_mongod_start_cmd())
if (return_code != 0):
cause = f'Please verify that (1) MongoDB is installed, (2) the dbpath `{self.dbpath}` exists with the rights required by mongod and (3) that no other MongoDB instance is using and consequently locking the respective files (=> Init HyperoptCluster with another dbpath or manually stop the mongod process). See hyperopt docs in README for further details.'
raise MasterStartError('localhost', master_port, cause)
time.sleep(timeout)
if (not _utils.localhost_has_free_port(master_port)):
self.log.info(('MongoDB started on localhost on port ' + str(self._port)))
else:
self.log.debug(('MongoDB could NOT be started successfully on port ' + str(self._port)))
cause = f'The master port {master_port} is still free when checking after the timeout of {timeout} seconds.'
raise MasterStartError('localhost', master_port, cause)
self.log.info('Expose the MongoDB port in the RuntimeGroup.')
self._group.expose_port_to_runtimes(self._port)
return (_ports if isinstance(_ports, list) else []) | Launch a master instance.
Note:
If you create a custom subclass of MasterLauncher which will not start the master instance on localhost
then you should pass the debug flag on to `execute_task()` of the `RuntimeGroup` or `Runtime` so that you
can benefit from the debug feature of `RuntimeTask.execute()`.
Args:
ports: Port where the DB should be started. If a list is given then the first port that is free in the
`RuntimeGroup` will be used. The actual chosen port can be requested via the property `port`.
timeout: Timeout (s) after which an MasterStartError is raised if DB instance not started yet. Defaults to
3 seconds.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then
the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to
`False`.
Returns:
List[int]: In case a port list was given the updated port list will be returned. Otherwise an empty list.
Raises:
PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.
NoPortsLeftError: If a port list was given and none of the ports is actually free in the `RuntimeGroup`.
MasterStartError: If master was not started after the specified `timeout`. | src/lazycluster/cluster/hyperopt_cluster.py | start | prototypefund/lazycluster | 44 | python | def start(self, ports: Union[(List[int], int)], timeout: int=0, debug: bool=False) -> List[int]:
'Launch a master instance.\n\n Note:\n If you create a custom subclass of MasterLauncher which will not start the master instance on localhost\n then you should pass the debug flag on to `execute_task()` of the `RuntimeGroup` or `Runtime` so that you\n can benefit from the debug feature of `RuntimeTask.execute()`.\n\n Args:\n ports: Port where the DB should be started. If a list is given then the first port that is free in the\n `RuntimeGroup` will be used. The actual chosen port can be requested via the property `port`.\n timeout: Timeout (s) after which an MasterStartError is raised if DB instance not started yet. Defaults to\n 3 seconds.\n debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then\n the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to\n `False`.\n\n Returns:\n List[int]: In case a port list was given the updated port list will be returned. Otherwise an empty list.\n\n Raises:\n PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.\n NoPortsLeftError: If a port list was given and none of the ports is actually free in the `RuntimeGroup`.\n MasterStartError: If master was not started after the specified `timeout`.\n '
if debug:
self.log.debug('The debug flag has no effect in LocalMongoLauncher.')
_ports: Union[(List[int], int)] = (ports.copy() if isinstance(ports, list) else ports)
if (not isinstance(_ports, list)):
if (_utils.localhost_has_free_port(_ports) and self._group.has_free_port(_ports, exclude_hosts=Runtime.LOCALHOST)):
self._port = master_port = _ports
else:
raise PortInUseError(_ports, self._group)
else:
self._port = master_port = self._group.get_free_port(_ports)
_ports = _utils.get_remaining_ports(_ports, master_port)
self.log.debug(f'Starting MongoDB on localhost on port {str(master_port)} with dbpath `{self.dbpath}` and logfile `{self.dbpath}/{HyperoptCluster.MONGO_LOG_FILENAME}`.')
return_code = os.system(self.get_mongod_start_cmd())
if (return_code != 0):
cause = f'Please verify that (1) MongoDB is installed, (2) the dbpath `{self.dbpath}` exists with the rights required by mongod and (3) that no other MongoDB instance is using and consequently locking the respective files (=> Init HyperoptCluster with another dbpath or manually stop the mongod process). See hyperopt docs in README for further details.'
raise MasterStartError('localhost', master_port, cause)
time.sleep(timeout)
if (not _utils.localhost_has_free_port(master_port)):
self.log.info(('MongoDB started on localhost on port ' + str(self._port)))
else:
self.log.debug(('MongoDB could NOT be started successfully on port ' + str(self._port)))
cause = f'The master port {master_port} is still free when checking after the timeout of {timeout} seconds.'
raise MasterStartError('localhost', master_port, cause)
self.log.info('Expose the MongoDB port in the RuntimeGroup.')
self._group.expose_port_to_runtimes(self._port)
return (_ports if isinstance(_ports, list) else []) | def start(self, ports: Union[(List[int], int)], timeout: int=0, debug: bool=False) -> List[int]:
'Launch a master instance.\n\n Note:\n If you create a custom subclass of MasterLauncher which will not start the master instance on localhost\n then you should pass the debug flag on to `execute_task()` of the `RuntimeGroup` or `Runtime` so that you\n can benefit from the debug feature of `RuntimeTask.execute()`.\n\n Args:\n ports: Port where the DB should be started. If a list is given then the first port that is free in the\n `RuntimeGroup` will be used. The actual chosen port can be requested via the property `port`.\n timeout: Timeout (s) after which an MasterStartError is raised if DB instance not started yet. Defaults to\n 3 seconds.\n debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then\n the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to\n `False`.\n\n Returns:\n List[int]: In case a port list was given the updated port list will be returned. Otherwise an empty list.\n\n Raises:\n PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.\n NoPortsLeftError: If a port list was given and none of the ports is actually free in the `RuntimeGroup`.\n MasterStartError: If master was not started after the specified `timeout`.\n '
if debug:
self.log.debug('The debug flag has no effect in LocalMongoLauncher.')
_ports: Union[(List[int], int)] = (ports.copy() if isinstance(ports, list) else ports)
if (not isinstance(_ports, list)):
if (_utils.localhost_has_free_port(_ports) and self._group.has_free_port(_ports, exclude_hosts=Runtime.LOCALHOST)):
self._port = master_port = _ports
else:
raise PortInUseError(_ports, self._group)
else:
self._port = master_port = self._group.get_free_port(_ports)
_ports = _utils.get_remaining_ports(_ports, master_port)
self.log.debug(f'Starting MongoDB on localhost on port {str(master_port)} with dbpath `{self.dbpath}` and logfile `{self.dbpath}/{HyperoptCluster.MONGO_LOG_FILENAME}`.')
return_code = os.system(self.get_mongod_start_cmd())
if (return_code != 0):
cause = f'Please verify that (1) MongoDB is installed, (2) the dbpath `{self.dbpath}` exists with the rights required by mongod and (3) that no other MongoDB instance is using and consequently locking the respective files (=> Init HyperoptCluster with another dbpath or manually stop the mongod process). See hyperopt docs in README for further details.'
raise MasterStartError('localhost', master_port, cause)
time.sleep(timeout)
if (not _utils.localhost_has_free_port(master_port)):
self.log.info(('MongoDB started on localhost on port ' + str(self._port)))
else:
self.log.debug(('MongoDB could NOT be started successfully on port ' + str(self._port)))
cause = f'The master port {master_port} is still free when checking after the timeout of {timeout} seconds.'
raise MasterStartError('localhost', master_port, cause)
self.log.info('Expose the MongoDB port in the RuntimeGroup.')
self._group.expose_port_to_runtimes(self._port)
return (_ports if isinstance(_ports, list) else [])<|docstring|>Launch a master instance.
Note:
If you create a custom subclass of MasterLauncher which will not start the master instance on localhost
then you should pass the debug flag on to `execute_task()` of the `RuntimeGroup` or `Runtime` so that you
can benefit from the debug feature of `RuntimeTask.execute()`.
Args:
ports: Port where the DB should be started. If a list is given then the first port that is free in the
`RuntimeGroup` will be used. The actual chosen port can be requested via the property `port`.
timeout: Timeout (s) after which an MasterStartError is raised if DB instance not started yet. Defaults to
3 seconds.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then
the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to
`False`.
Returns:
List[int]: In case a port list was given the updated port list will be returned. Otherwise an empty list.
Raises:
PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.
NoPortsLeftError: If a port list was given and none of the ports is actually free in the `RuntimeGroup`.
MasterStartError: If master was not started after the specified `timeout`.<|endoftext|> |
5570a1cd7afdf371d801b0434203a922b24a7f382ade81474f15b0ce09e1a2e6 | def get_mongod_start_cmd(self) -> str:
'Get the shell command for starting mongod as a deamon process.\n\n Returns:\n str: The shell command.\n '
return f'mongod --fork --logpath={self.dbpath}/{HyperoptCluster.MONGO_LOG_FILENAME} --dbpath={self.dbpath} --port={self._port}' | Get the shell command for starting mongod as a deamon process.
Returns:
str: The shell command. | src/lazycluster/cluster/hyperopt_cluster.py | get_mongod_start_cmd | prototypefund/lazycluster | 44 | python | def get_mongod_start_cmd(self) -> str:
'Get the shell command for starting mongod as a deamon process.\n\n Returns:\n str: The shell command.\n '
return f'mongod --fork --logpath={self.dbpath}/{HyperoptCluster.MONGO_LOG_FILENAME} --dbpath={self.dbpath} --port={self._port}' | def get_mongod_start_cmd(self) -> str:
'Get the shell command for starting mongod as a deamon process.\n\n Returns:\n str: The shell command.\n '
return f'mongod --fork --logpath={self.dbpath}/{HyperoptCluster.MONGO_LOG_FILENAME} --dbpath={self.dbpath} --port={self._port}'<|docstring|>Get the shell command for starting mongod as a deamon process.
Returns:
str: The shell command.<|endoftext|> |
28e1927f3492e5c25459504d9ed6577516e1accd5f03e81ed45fd8bbf05adf2b | def get_mongod_stop_cmd(self) -> str:
'Get the shell command for stopping the currently running mongod process.\n\n Returns:\n str: The shell command.\n '
return f'mongod --shutdown --dbpath={self.dbpath}' | Get the shell command for stopping the currently running mongod process.
Returns:
str: The shell command. | src/lazycluster/cluster/hyperopt_cluster.py | get_mongod_stop_cmd | prototypefund/lazycluster | 44 | python | def get_mongod_stop_cmd(self) -> str:
'Get the shell command for stopping the currently running mongod process.\n\n Returns:\n str: The shell command.\n '
return f'mongod --shutdown --dbpath={self.dbpath}' | def get_mongod_stop_cmd(self) -> str:
'Get the shell command for stopping the currently running mongod process.\n\n Returns:\n str: The shell command.\n '
return f'mongod --shutdown --dbpath={self.dbpath}'<|docstring|>Get the shell command for stopping the currently running mongod process.
Returns:
str: The shell command.<|endoftext|> |
dcc776077e803bdaa0ea9d08995b8e6884bcac48cbec131aa8d90af27031f8f2 | def cleanup(self) -> None:
'Release all resources.'
self.log.info('Stop the MongoDB ...')
self.log.debug('Cleaning up the LocalMasterLauncher ...')
return_code = os.system(self.get_mongod_stop_cmd())
if (return_code == 0):
self.log.info('MongoDB successfully stopped.')
else:
self.log.warning('MongoDB daemon could NOT be stopped.')
super().cleanup() | Release all resources. | src/lazycluster/cluster/hyperopt_cluster.py | cleanup | prototypefund/lazycluster | 44 | python | def cleanup(self) -> None:
self.log.info('Stop the MongoDB ...')
self.log.debug('Cleaning up the LocalMasterLauncher ...')
return_code = os.system(self.get_mongod_stop_cmd())
if (return_code == 0):
self.log.info('MongoDB successfully stopped.')
else:
self.log.warning('MongoDB daemon could NOT be stopped.')
super().cleanup() | def cleanup(self) -> None:
self.log.info('Stop the MongoDB ...')
self.log.debug('Cleaning up the LocalMasterLauncher ...')
return_code = os.system(self.get_mongod_stop_cmd())
if (return_code == 0):
self.log.info('MongoDB successfully stopped.')
else:
self.log.warning('MongoDB daemon could NOT be stopped.')
super().cleanup()<|docstring|>Release all resources.<|endoftext|> |
add0b8a3ef0756471cb7fbc958b45ab9201485dc57bb8610148c282aefb5fe8d | def __init__(self, runtime_group: RuntimeGroup, dbname: str, poll_interval: float):
'Initialization method.\n\n Args:\n runtime_group: The group where the workers will be started.\n dbname: The name of the mongodb instance.\n poll_interval: The poll interval of the hyperopt worker.\n\n Raises.\n ValueError: In case dbname is empty.\n '
super().__init__(runtime_group)
self._ports = None
if (not dbname):
raise ValueError('dbname must not be empty')
self._dbname = dbname
if (not poll_interval):
raise ValueError('poll_interval must not be empty')
self._poll_interval = poll_interval
self.log.debug('RoundRobinLauncher initialized.') | Initialization method.
Args:
runtime_group: The group where the workers will be started.
dbname: The name of the mongodb instance.
poll_interval: The poll interval of the hyperopt worker.
Raises.
ValueError: In case dbname is empty. | src/lazycluster/cluster/hyperopt_cluster.py | __init__ | prototypefund/lazycluster | 44 | python | def __init__(self, runtime_group: RuntimeGroup, dbname: str, poll_interval: float):
'Initialization method.\n\n Args:\n runtime_group: The group where the workers will be started.\n dbname: The name of the mongodb instance.\n poll_interval: The poll interval of the hyperopt worker.\n\n Raises.\n ValueError: In case dbname is empty.\n '
super().__init__(runtime_group)
self._ports = None
if (not dbname):
raise ValueError('dbname must not be empty')
self._dbname = dbname
if (not poll_interval):
raise ValueError('poll_interval must not be empty')
self._poll_interval = poll_interval
self.log.debug('RoundRobinLauncher initialized.') | def __init__(self, runtime_group: RuntimeGroup, dbname: str, poll_interval: float):
'Initialization method.\n\n Args:\n runtime_group: The group where the workers will be started.\n dbname: The name of the mongodb instance.\n poll_interval: The poll interval of the hyperopt worker.\n\n Raises.\n ValueError: In case dbname is empty.\n '
super().__init__(runtime_group)
self._ports = None
if (not dbname):
raise ValueError('dbname must not be empty')
self._dbname = dbname
if (not poll_interval):
raise ValueError('poll_interval must not be empty')
self._poll_interval = poll_interval
self.log.debug('RoundRobinLauncher initialized.')<|docstring|>Initialization method.
Args:
runtime_group: The group where the workers will be started.
dbname: The name of the mongodb instance.
poll_interval: The poll interval of the hyperopt worker.
Raises.
ValueError: In case dbname is empty.<|endoftext|> |
f56555c7b8c59deeed396517fbff951f473a1abd3ebca5c14005ae3f2b1a915a | def start(self, worker_count: int, master_port: int, ports: List[int]=None, debug: bool=True) -> List[int]:
'Launches the worker instances in the `RuntimeGroup`.\n\n Args:\n worker_count: The number of worker instances to be started in the group.\n master_port: The port of the master instance.\n ports: Without use here. Only here because we need to adhere to the interface defined by the\n WorkerLauncher class.\n debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then\n the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to\n `False`.\n\n Returns:\n List[int]: The updated port list after starting the workers, i.e. the used ones were removed.\n '
hosts = self._group.hosts
runtimes = self._group.runtimes
for worker_index in range(worker_count):
runtime_index = ((self._group.runtime_count + worker_index) % self._group.runtime_count)
host = hosts[runtime_index]
assert (host == runtimes[runtime_index].host)
self.log.debug(f'Launch Hyperopt worker with index {worker_index} on Runtime {host}')
self._launch_single_worker(host, worker_index, master_port, debug)
return (ports if isinstance(ports, list) else []) | Launches the worker instances in the `RuntimeGroup`.
Args:
worker_count: The number of worker instances to be started in the group.
master_port: The port of the master instance.
ports: Without use here. Only here because we need to adhere to the interface defined by the
WorkerLauncher class.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then
the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to
`False`.
Returns:
List[int]: The updated port list after starting the workers, i.e. the used ones were removed. | src/lazycluster/cluster/hyperopt_cluster.py | start | prototypefund/lazycluster | 44 | python | def start(self, worker_count: int, master_port: int, ports: List[int]=None, debug: bool=True) -> List[int]:
'Launches the worker instances in the `RuntimeGroup`.\n\n Args:\n worker_count: The number of worker instances to be started in the group.\n master_port: The port of the master instance.\n ports: Without use here. Only here because we need to adhere to the interface defined by the\n WorkerLauncher class.\n debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then\n the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to\n `False`.\n\n Returns:\n List[int]: The updated port list after starting the workers, i.e. the used ones were removed.\n '
hosts = self._group.hosts
runtimes = self._group.runtimes
for worker_index in range(worker_count):
runtime_index = ((self._group.runtime_count + worker_index) % self._group.runtime_count)
host = hosts[runtime_index]
assert (host == runtimes[runtime_index].host)
self.log.debug(f'Launch Hyperopt worker with index {worker_index} on Runtime {host}')
self._launch_single_worker(host, worker_index, master_port, debug)
return (ports if isinstance(ports, list) else []) | def start(self, worker_count: int, master_port: int, ports: List[int]=None, debug: bool=True) -> List[int]:
'Launches the worker instances in the `RuntimeGroup`.\n\n Args:\n worker_count: The number of worker instances to be started in the group.\n master_port: The port of the master instance.\n ports: Without use here. Only here because we need to adhere to the interface defined by the\n WorkerLauncher class.\n debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then\n the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to\n `False`.\n\n Returns:\n List[int]: The updated port list after starting the workers, i.e. the used ones were removed.\n '
hosts = self._group.hosts
runtimes = self._group.runtimes
for worker_index in range(worker_count):
runtime_index = ((self._group.runtime_count + worker_index) % self._group.runtime_count)
host = hosts[runtime_index]
assert (host == runtimes[runtime_index].host)
self.log.debug(f'Launch Hyperopt worker with index {worker_index} on Runtime {host}')
self._launch_single_worker(host, worker_index, master_port, debug)
return (ports if isinstance(ports, list) else [])<|docstring|>Launches the worker instances in the `RuntimeGroup`.
Args:
worker_count: The number of worker instances to be started in the group.
master_port: The port of the master instance.
ports: Without use here. Only here because we need to adhere to the interface defined by the
WorkerLauncher class.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. If, `False` then
the stdout/stderr will be added to python logger with level debug after each `RuntimeTask` step. Defaults to
`False`.
Returns:
List[int]: The updated port list after starting the workers, i.e. the used ones were removed.<|endoftext|> |
19416986d5cd4e8b7aaf5be7ec2bcc95f784d9a96cdf6fbb362bf4bf48eefdee | def _launch_single_worker(self, host: str, worker_index: int, master_port: int, debug: bool) -> None:
'Launch a single worker instance in a `Runtime` in the `RuntimeGroup`.'
task = RuntimeTask(('launch-hyperopt-worker-' + str(worker_index)))
task.run_command(self._get_launch_command(master_port, self._dbname, self._poll_interval))
self._group.execute_task(task, host, omit_on_join=True, debug=debug) | Launch a single worker instance in a `Runtime` in the `RuntimeGroup`. | src/lazycluster/cluster/hyperopt_cluster.py | _launch_single_worker | prototypefund/lazycluster | 44 | python | def _launch_single_worker(self, host: str, worker_index: int, master_port: int, debug: bool) -> None:
task = RuntimeTask(('launch-hyperopt-worker-' + str(worker_index)))
task.run_command(self._get_launch_command(master_port, self._dbname, self._poll_interval))
self._group.execute_task(task, host, omit_on_join=True, debug=debug) | def _launch_single_worker(self, host: str, worker_index: int, master_port: int, debug: bool) -> None:
task = RuntimeTask(('launch-hyperopt-worker-' + str(worker_index)))
task.run_command(self._get_launch_command(master_port, self._dbname, self._poll_interval))
self._group.execute_task(task, host, omit_on_join=True, debug=debug)<|docstring|>Launch a single worker instance in a `Runtime` in the `RuntimeGroup`.<|endoftext|> |
57ca66111a27a97f89b88bc967aff3245e720cdb516e16028972c8e70c01557b | @classmethod
def _get_launch_command(cls, master_port: int, dbname: str, poll_interval: float=0.1) -> str:
'Get the shell command for starting a worker instance.\n\n Returns:\n str: The launch command.\n '
return f'hyperopt-mongo-worker --mongo=localhost:{str(master_port)}/{dbname} --poll-interval={str(poll_interval)}' | Get the shell command for starting a worker instance.
Returns:
str: The launch command. | src/lazycluster/cluster/hyperopt_cluster.py | _get_launch_command | prototypefund/lazycluster | 44 | python | @classmethod
def _get_launch_command(cls, master_port: int, dbname: str, poll_interval: float=0.1) -> str:
'Get the shell command for starting a worker instance.\n\n Returns:\n str: The launch command.\n '
return f'hyperopt-mongo-worker --mongo=localhost:{str(master_port)}/{dbname} --poll-interval={str(poll_interval)}' | @classmethod
def _get_launch_command(cls, master_port: int, dbname: str, poll_interval: float=0.1) -> str:
'Get the shell command for starting a worker instance.\n\n Returns:\n str: The launch command.\n '
return f'hyperopt-mongo-worker --mongo=localhost:{str(master_port)}/{dbname} --poll-interval={str(poll_interval)}'<|docstring|>Get the shell command for starting a worker instance.
Returns:
str: The launch command.<|endoftext|> |
38ead3314edd9a18d8cec37f38b62eec5e3705875f73fbbc08a202085e96cef7 | def cleanup(self) -> None:
'Release all resources.'
self.log.info('Cleanup the RoundRobinLauncher ...')
super().cleanup() | Release all resources. | src/lazycluster/cluster/hyperopt_cluster.py | cleanup | prototypefund/lazycluster | 44 | python | def cleanup(self) -> None:
self.log.info('Cleanup the RoundRobinLauncher ...')
super().cleanup() | def cleanup(self) -> None:
self.log.info('Cleanup the RoundRobinLauncher ...')
super().cleanup()<|docstring|>Release all resources.<|endoftext|> |
3f56806686a5cdae57913b35abfad6fa866ee8675039fe5bba715f8b924f6093 | def __init__(self, runtime_group: RuntimeGroup, mongo_launcher: Optional[MongoLauncher]=None, worker_launcher: Optional[WorkerLauncher]=None, dbpath: Optional[str]=None, dbname: str='hyperopt', worker_poll_intervall: float=0.1):
'Initialization method.\n\n Args:\n runtime_group: The `RuntimeGroup` contains all `Runtimes` which can be used for starting the entities.\n mongo_launcher: Optionally, an instance implementing the `MasterLauncher` interface can be given, which\n implements the strategy for launching the master instances in the cluster. If None, then\n `LocalMasterLauncher` is used.\n worker_launcher: Optionally, an instance implementing the `WorkerLauncher` interface can be given, which\n implements the strategy for launching the worker instances. If None, then\n `RoundRobinLauncher` is used.\n dbpath: The directory where the db files will be kept. Defaults to a `mongodb` directory inside the\n `utils.Environment.main_directory`.\n dbname: The name of the database to be used for experiments. See MongoTrials url scheme in hyperopt\n documentation for more details. Defaults to ´hyperopt´.\n worker_poll_intervall: The poll interval of the hyperopt worker. Defaults to `0.1`.\n\n Raises:\n PermissionError: If the `dbpath` does not exsist and could not be created due to lack of permissions.\n '
super().__init__(runtime_group)
self._master_launcher = (mongo_launcher or LocalMongoLauncher(runtime_group))
if dbpath:
self._master_launcher.dbpath = os.path.join(Environment.main_directory, 'mongodb')
assert self._master_launcher.dbpath
try:
os.makedirs(self._master_launcher.dbpath)
except FileExistsError:
pass
else:
self._master_launcher.dbpath = dbpath
self._dbname = dbname
self._worker_launcher = (worker_launcher if worker_launcher else RoundRobinLauncher(runtime_group, dbname, worker_poll_intervall))
self.log.debug('HyperoptCluster initialized.') | Initialization method.
Args:
runtime_group: The `RuntimeGroup` contains all `Runtimes` which can be used for starting the entities.
mongo_launcher: Optionally, an instance implementing the `MasterLauncher` interface can be given, which
implements the strategy for launching the master instances in the cluster. If None, then
`LocalMasterLauncher` is used.
worker_launcher: Optionally, an instance implementing the `WorkerLauncher` interface can be given, which
implements the strategy for launching the worker instances. If None, then
`RoundRobinLauncher` is used.
dbpath: The directory where the db files will be kept. Defaults to a `mongodb` directory inside the
`utils.Environment.main_directory`.
dbname: The name of the database to be used for experiments. See MongoTrials url scheme in hyperopt
documentation for more details. Defaults to ´hyperopt´.
worker_poll_intervall: The poll interval of the hyperopt worker. Defaults to `0.1`.
Raises:
PermissionError: If the `dbpath` does not exsist and could not be created due to lack of permissions. | src/lazycluster/cluster/hyperopt_cluster.py | __init__ | prototypefund/lazycluster | 44 | python | def __init__(self, runtime_group: RuntimeGroup, mongo_launcher: Optional[MongoLauncher]=None, worker_launcher: Optional[WorkerLauncher]=None, dbpath: Optional[str]=None, dbname: str='hyperopt', worker_poll_intervall: float=0.1):
'Initialization method.\n\n Args:\n runtime_group: The `RuntimeGroup` contains all `Runtimes` which can be used for starting the entities.\n mongo_launcher: Optionally, an instance implementing the `MasterLauncher` interface can be given, which\n implements the strategy for launching the master instances in the cluster. If None, then\n `LocalMasterLauncher` is used.\n worker_launcher: Optionally, an instance implementing the `WorkerLauncher` interface can be given, which\n implements the strategy for launching the worker instances. If None, then\n `RoundRobinLauncher` is used.\n dbpath: The directory where the db files will be kept. Defaults to a `mongodb` directory inside the\n `utils.Environment.main_directory`.\n dbname: The name of the database to be used for experiments. See MongoTrials url scheme in hyperopt\n documentation for more details. Defaults to ´hyperopt´.\n worker_poll_intervall: The poll interval of the hyperopt worker. Defaults to `0.1`.\n\n Raises:\n PermissionError: If the `dbpath` does not exsist and could not be created due to lack of permissions.\n '
super().__init__(runtime_group)
self._master_launcher = (mongo_launcher or LocalMongoLauncher(runtime_group))
if dbpath:
self._master_launcher.dbpath = os.path.join(Environment.main_directory, 'mongodb')
assert self._master_launcher.dbpath
try:
os.makedirs(self._master_launcher.dbpath)
except FileExistsError:
pass
else:
self._master_launcher.dbpath = dbpath
self._dbname = dbname
self._worker_launcher = (worker_launcher if worker_launcher else RoundRobinLauncher(runtime_group, dbname, worker_poll_intervall))
self.log.debug('HyperoptCluster initialized.') | def __init__(self, runtime_group: RuntimeGroup, mongo_launcher: Optional[MongoLauncher]=None, worker_launcher: Optional[WorkerLauncher]=None, dbpath: Optional[str]=None, dbname: str='hyperopt', worker_poll_intervall: float=0.1):
'Initialization method.\n\n Args:\n runtime_group: The `RuntimeGroup` contains all `Runtimes` which can be used for starting the entities.\n mongo_launcher: Optionally, an instance implementing the `MasterLauncher` interface can be given, which\n implements the strategy for launching the master instances in the cluster. If None, then\n `LocalMasterLauncher` is used.\n worker_launcher: Optionally, an instance implementing the `WorkerLauncher` interface can be given, which\n implements the strategy for launching the worker instances. If None, then\n `RoundRobinLauncher` is used.\n dbpath: The directory where the db files will be kept. Defaults to a `mongodb` directory inside the\n `utils.Environment.main_directory`.\n dbname: The name of the database to be used for experiments. See MongoTrials url scheme in hyperopt\n documentation for more details. Defaults to ´hyperopt´.\n worker_poll_intervall: The poll interval of the hyperopt worker. Defaults to `0.1`.\n\n Raises:\n PermissionError: If the `dbpath` does not exsist and could not be created due to lack of permissions.\n '
super().__init__(runtime_group)
self._master_launcher = (mongo_launcher or LocalMongoLauncher(runtime_group))
if dbpath:
self._master_launcher.dbpath = os.path.join(Environment.main_directory, 'mongodb')
assert self._master_launcher.dbpath
try:
os.makedirs(self._master_launcher.dbpath)
except FileExistsError:
pass
else:
self._master_launcher.dbpath = dbpath
self._dbname = dbname
self._worker_launcher = (worker_launcher if worker_launcher else RoundRobinLauncher(runtime_group, dbname, worker_poll_intervall))
self.log.debug('HyperoptCluster initialized.')<|docstring|>Initialization method.
Args:
runtime_group: The `RuntimeGroup` contains all `Runtimes` which can be used for starting the entities.
mongo_launcher: Optionally, an instance implementing the `MasterLauncher` interface can be given, which
implements the strategy for launching the master instances in the cluster. If None, then
`LocalMasterLauncher` is used.
worker_launcher: Optionally, an instance implementing the `WorkerLauncher` interface can be given, which
implements the strategy for launching the worker instances. If None, then
`RoundRobinLauncher` is used.
dbpath: The directory where the db files will be kept. Defaults to a `mongodb` directory inside the
`utils.Environment.main_directory`.
dbname: The name of the database to be used for experiments. See MongoTrials url scheme in hyperopt
documentation for more details. Defaults to ´hyperopt´.
worker_poll_intervall: The poll interval of the hyperopt worker. Defaults to `0.1`.
Raises:
PermissionError: If the `dbpath` does not exsist and could not be created due to lack of permissions.<|endoftext|> |
416be4f67ca40aa0a88504de3269962f1778a51c8a231d64a6146d0cfab695af | @property
def mongo_trial_url(self) -> str:
'The MongoDB url indicating what mongod process and which database to use.\n\n Note:\n The format is the format required by the hyperopt MongoTrials object.\n\n Returns:\n str: URL string.\n '
if (not self.master_port):
self.log.warning('HyperoptCluster.mongo_trial_url was requested although the master_port is not yet set.')
return f'mongo://localhost:{self.master_port}/{self.dbname}/jobs' | The MongoDB url indicating what mongod process and which database to use.
Note:
The format is the format required by the hyperopt MongoTrials object.
Returns:
str: URL string. | src/lazycluster/cluster/hyperopt_cluster.py | mongo_trial_url | prototypefund/lazycluster | 44 | python | @property
def mongo_trial_url(self) -> str:
'The MongoDB url indicating what mongod process and which database to use.\n\n Note:\n The format is the format required by the hyperopt MongoTrials object.\n\n Returns:\n str: URL string.\n '
if (not self.master_port):
self.log.warning('HyperoptCluster.mongo_trial_url was requested although the master_port is not yet set.')
return f'mongo://localhost:{self.master_port}/{self.dbname}/jobs' | @property
def mongo_trial_url(self) -> str:
'The MongoDB url indicating what mongod process and which database to use.\n\n Note:\n The format is the format required by the hyperopt MongoTrials object.\n\n Returns:\n str: URL string.\n '
if (not self.master_port):
self.log.warning('HyperoptCluster.mongo_trial_url was requested although the master_port is not yet set.')
return f'mongo://localhost:{self.master_port}/{self.dbname}/jobs'<|docstring|>The MongoDB url indicating what mongod process and which database to use.
Note:
The format is the format required by the hyperopt MongoTrials object.
Returns:
str: URL string.<|endoftext|> |
370b9640a65d2da63f069e130791b039d5f515bc75294adc340181a1c4044d3c | @property
def mongo_url(self) -> str:
'The MongoDB url indicating what mongod process and which database to use.\n\n Note:\n The format is `mongo://host:port/dbname`.\n\n Returns:\n str: URL string.\n '
if (not self.master_port):
self.log.warning('HyperoptCluster.mongo_trial_url was requested although the master_port is not yet set.')
return f'mongo://localhost:{self.master_port}/{self.dbname}' | The MongoDB url indicating what mongod process and which database to use.
Note:
The format is `mongo://host:port/dbname`.
Returns:
str: URL string. | src/lazycluster/cluster/hyperopt_cluster.py | mongo_url | prototypefund/lazycluster | 44 | python | @property
def mongo_url(self) -> str:
'The MongoDB url indicating what mongod process and which database to use.\n\n Note:\n The format is `mongo://host:port/dbname`.\n\n Returns:\n str: URL string.\n '
if (not self.master_port):
self.log.warning('HyperoptCluster.mongo_trial_url was requested although the master_port is not yet set.')
return f'mongo://localhost:{self.master_port}/{self.dbname}' | @property
def mongo_url(self) -> str:
'The MongoDB url indicating what mongod process and which database to use.\n\n Note:\n The format is `mongo://host:port/dbname`.\n\n Returns:\n str: URL string.\n '
if (not self.master_port):
self.log.warning('HyperoptCluster.mongo_trial_url was requested although the master_port is not yet set.')
return f'mongo://localhost:{self.master_port}/{self.dbname}'<|docstring|>The MongoDB url indicating what mongod process and which database to use.
Note:
The format is `mongo://host:port/dbname`.
Returns:
str: URL string.<|endoftext|> |
582f53ecf9947e8b01858fd17f2f804b7b0d92653117e09362abf545355ab04d | @property
def dbname(self) -> str:
'The name of the MongoDB database to be used for experiments.'
return self._dbname | The name of the MongoDB database to be used for experiments. | src/lazycluster/cluster/hyperopt_cluster.py | dbname | prototypefund/lazycluster | 44 | python | @property
def dbname(self) -> str:
return self._dbname | @property
def dbname(self) -> str:
return self._dbname<|docstring|>The name of the MongoDB database to be used for experiments.<|endoftext|> |
ecafe82af23035ae92828c50784d06f7809888d1518b5ecdc53883330238e21d | def start_master(self, master_port: Optional[int]=None, timeout: int=3, debug: bool=False) -> None:
'Start the master instance.\n\n Note:\n How the master is actually started is determined by the the actual `MasterLauncher` implementation. Another\n implementation adhering to the `MasterLauncher` interface can be provided in the constructor of the cluster\n class.\n\n Args:\n master_port: Port of the master instance. Defaults to self.DEFAULT_MASTER_PORT, but another one is chosen if\n the port is not free within the group. The actual chosen port can be requested via\n self.master_port.\n timeout: Timeout (s) after which an MasterStartError is raised if master instance not started yet.\n debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. Has no effect for\n if the master instance is started locally, what default MasterLauncher implementations usually do.\n\n Raises:\n PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.\n NoPortsLeftError: If there are no free ports left in the port list for instantiating the master.\n MasterStartError: If master was not started after the specified `timeout`.\n '
super().start_master(master_port, timeout)
self._group.add_env_variables({self.ENV_NAME_MONGO_URL: self.mongo_trial_url}) | Start the master instance.
Note:
How the master is actually started is determined by the the actual `MasterLauncher` implementation. Another
implementation adhering to the `MasterLauncher` interface can be provided in the constructor of the cluster
class.
Args:
master_port: Port of the master instance. Defaults to self.DEFAULT_MASTER_PORT, but another one is chosen if
the port is not free within the group. The actual chosen port can be requested via
self.master_port.
timeout: Timeout (s) after which an MasterStartError is raised if master instance not started yet.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. Has no effect for
if the master instance is started locally, what default MasterLauncher implementations usually do.
Raises:
PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.
NoPortsLeftError: If there are no free ports left in the port list for instantiating the master.
MasterStartError: If master was not started after the specified `timeout`. | src/lazycluster/cluster/hyperopt_cluster.py | start_master | prototypefund/lazycluster | 44 | python | def start_master(self, master_port: Optional[int]=None, timeout: int=3, debug: bool=False) -> None:
'Start the master instance.\n\n Note:\n How the master is actually started is determined by the the actual `MasterLauncher` implementation. Another\n implementation adhering to the `MasterLauncher` interface can be provided in the constructor of the cluster\n class.\n\n Args:\n master_port: Port of the master instance. Defaults to self.DEFAULT_MASTER_PORT, but another one is chosen if\n the port is not free within the group. The actual chosen port can be requested via\n self.master_port.\n timeout: Timeout (s) after which an MasterStartError is raised if master instance not started yet.\n debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. Has no effect for\n if the master instance is started locally, what default MasterLauncher implementations usually do.\n\n Raises:\n PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.\n NoPortsLeftError: If there are no free ports left in the port list for instantiating the master.\n MasterStartError: If master was not started after the specified `timeout`.\n '
super().start_master(master_port, timeout)
self._group.add_env_variables({self.ENV_NAME_MONGO_URL: self.mongo_trial_url}) | def start_master(self, master_port: Optional[int]=None, timeout: int=3, debug: bool=False) -> None:
'Start the master instance.\n\n Note:\n How the master is actually started is determined by the the actual `MasterLauncher` implementation. Another\n implementation adhering to the `MasterLauncher` interface can be provided in the constructor of the cluster\n class.\n\n Args:\n master_port: Port of the master instance. Defaults to self.DEFAULT_MASTER_PORT, but another one is chosen if\n the port is not free within the group. The actual chosen port can be requested via\n self.master_port.\n timeout: Timeout (s) after which an MasterStartError is raised if master instance not started yet.\n debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. Has no effect for\n if the master instance is started locally, what default MasterLauncher implementations usually do.\n\n Raises:\n PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.\n NoPortsLeftError: If there are no free ports left in the port list for instantiating the master.\n MasterStartError: If master was not started after the specified `timeout`.\n '
super().start_master(master_port, timeout)
self._group.add_env_variables({self.ENV_NAME_MONGO_URL: self.mongo_trial_url})<|docstring|>Start the master instance.
Note:
How the master is actually started is determined by the the actual `MasterLauncher` implementation. Another
implementation adhering to the `MasterLauncher` interface can be provided in the constructor of the cluster
class.
Args:
master_port: Port of the master instance. Defaults to self.DEFAULT_MASTER_PORT, but another one is chosen if
the port is not free within the group. The actual chosen port can be requested via
self.master_port.
timeout: Timeout (s) after which an MasterStartError is raised if master instance not started yet.
debug: If `True`, stdout/stderr from the runtime will be printed to stdout of localhost. Has no effect for
if the master instance is started locally, what default MasterLauncher implementations usually do.
Raises:
PortInUseError: If a single port is given and it is not free in the `RuntimeGroup`.
NoPortsLeftError: If there are no free ports left in the port list for instantiating the master.
MasterStartError: If master was not started after the specified `timeout`.<|endoftext|> |
041562261bfcdc7657dbf87d829ef0df3cee891ef2d548c79f59040aa3db2bed | def cleanup(self) -> None:
'Release all resources.'
self.log.info('Shutting down the HyperoptCluster...')
super().cleanup() | Release all resources. | src/lazycluster/cluster/hyperopt_cluster.py | cleanup | prototypefund/lazycluster | 44 | python | def cleanup(self) -> None:
self.log.info('Shutting down the HyperoptCluster...')
super().cleanup() | def cleanup(self) -> None:
self.log.info('Shutting down the HyperoptCluster...')
super().cleanup()<|docstring|>Release all resources.<|endoftext|> |
435730aa88fe1fdfdab1c6aa1a95c84d8fff72140ba3f2b9996fccef19ddab78 | def html2markdown(html):
'Converts `html` to Markdown-formatted text\n '
markdown_text = pypandoc.convert_text(html, 'markdown_strict', format='html')
return markdown_text | Converts `html` to Markdown-formatted text | utils/text/converters.py | html2markdown | aweandreverence/django-htk | 206 | python | def html2markdown(html):
'\n '
markdown_text = pypandoc.convert_text(html, 'markdown_strict', format='html')
return markdown_text | def html2markdown(html):
'\n '
markdown_text = pypandoc.convert_text(html, 'markdown_strict', format='html')
return markdown_text<|docstring|>Converts `html` to Markdown-formatted text<|endoftext|> |
e5116d825f04d81d592c1b5885a9543f11251893927846bd96c44318b7f4647a | def markdown2slack(markdown_text):
'Converts Markdown-formatted text to Slack-formatted text\n '
markdown_lines = markdown_text.split('\n')
slack_lines = []
for line in markdown_lines:
line = line.strip()
line = re.sub('\\*\\*(.+?)\\*\\*', '<b>\\1<b>', line)
line = re.sub('(\\*(.+?)\\*)', '_\\1_', line)
line = re.sub('<b>(.+?)<b>', '*\\1*', line)
line = re.sub('^#+(.*)$', '*\\1*', line)
slack_lines.append(line)
slack_text = '\n'.join(slack_lines)
return slack_text | Converts Markdown-formatted text to Slack-formatted text | utils/text/converters.py | markdown2slack | aweandreverence/django-htk | 206 | python | def markdown2slack(markdown_text):
'\n '
markdown_lines = markdown_text.split('\n')
slack_lines = []
for line in markdown_lines:
line = line.strip()
line = re.sub('\\*\\*(.+?)\\*\\*', '<b>\\1<b>', line)
line = re.sub('(\\*(.+?)\\*)', '_\\1_', line)
line = re.sub('<b>(.+?)<b>', '*\\1*', line)
line = re.sub('^#+(.*)$', '*\\1*', line)
slack_lines.append(line)
slack_text = '\n'.join(slack_lines)
return slack_text | def markdown2slack(markdown_text):
'\n '
markdown_lines = markdown_text.split('\n')
slack_lines = []
for line in markdown_lines:
line = line.strip()
line = re.sub('\\*\\*(.+?)\\*\\*', '<b>\\1<b>', line)
line = re.sub('(\\*(.+?)\\*)', '_\\1_', line)
line = re.sub('<b>(.+?)<b>', '*\\1*', line)
line = re.sub('^#+(.*)$', '*\\1*', line)
slack_lines.append(line)
slack_text = '\n'.join(slack_lines)
return slack_text<|docstring|>Converts Markdown-formatted text to Slack-formatted text<|endoftext|> |
98d824085d17c4c7feac93cce3a8323b140a45bfd05f8680229e4898c48578cc | def check_input(args):
'Checks whether to read from stdin/file and validates user input/options.\n '
option = ''
fh = sys.stdin
if (not len(args)):
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif (len(args) == 1):
if args[0].startswith('-'):
option = args[0][1:]
if sys.stdin.isatty():
emsg = 'ERROR!! No data to process!\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
if (not os.path.isfile(args[0])):
emsg = "ERROR!! File not found or not readable: '{}'\n"
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
elif (len(args) == 2):
if (not args[0].startswith('-')):
emsg = "ERROR! First argument is not an option: '{}'\n"
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
if (not os.path.isfile(args[1])):
emsg = "ERROR!! File not found or not readable: '{}'\n"
sys.stderr.write(emsg.format(args[1]))
sys.stderr.write(__doc__)
sys.exit(1)
option = args[0][1:]
fh = open(args[1], 'r')
else:
sys.stderr.write(__doc__)
sys.exit(1)
option_set = set([o.upper().strip() for o in option.split(',') if o.strip()])
if (not option_set):
emsg = 'ERROR!! You must provide at least one segment identifier\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
for seg_id in option_set:
if (len(seg_id) > 4):
emsg = "ERROR!! Segment identifier name is invalid: '{}'\n"
sys.stderr.write(emsg.format(seg_id))
sys.stderr.write(__doc__)
sys.exit(1)
return (fh, option_set) | Checks whether to read from stdin/file and validates user input/options. | pdbtools/pdb_selseg.py | check_input | andrewsb8/pdb-tools | 192 | python | def check_input(args):
'\n '
option =
fh = sys.stdin
if (not len(args)):
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif (len(args) == 1):
if args[0].startswith('-'):
option = args[0][1:]
if sys.stdin.isatty():
emsg = 'ERROR!! No data to process!\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
if (not os.path.isfile(args[0])):
emsg = "ERROR!! File not found or not readable: '{}'\n"
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
elif (len(args) == 2):
if (not args[0].startswith('-')):
emsg = "ERROR! First argument is not an option: '{}'\n"
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
if (not os.path.isfile(args[1])):
emsg = "ERROR!! File not found or not readable: '{}'\n"
sys.stderr.write(emsg.format(args[1]))
sys.stderr.write(__doc__)
sys.exit(1)
option = args[0][1:]
fh = open(args[1], 'r')
else:
sys.stderr.write(__doc__)
sys.exit(1)
option_set = set([o.upper().strip() for o in option.split(',') if o.strip()])
if (not option_set):
emsg = 'ERROR!! You must provide at least one segment identifier\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
for seg_id in option_set:
if (len(seg_id) > 4):
emsg = "ERROR!! Segment identifier name is invalid: '{}'\n"
sys.stderr.write(emsg.format(seg_id))
sys.stderr.write(__doc__)
sys.exit(1)
return (fh, option_set) | def check_input(args):
'\n '
option =
fh = sys.stdin
if (not len(args)):
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif (len(args) == 1):
if args[0].startswith('-'):
option = args[0][1:]
if sys.stdin.isatty():
emsg = 'ERROR!! No data to process!\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
if (not os.path.isfile(args[0])):
emsg = "ERROR!! File not found or not readable: '{}'\n"
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
elif (len(args) == 2):
if (not args[0].startswith('-')):
emsg = "ERROR! First argument is not an option: '{}'\n"
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
if (not os.path.isfile(args[1])):
emsg = "ERROR!! File not found or not readable: '{}'\n"
sys.stderr.write(emsg.format(args[1]))
sys.stderr.write(__doc__)
sys.exit(1)
option = args[0][1:]
fh = open(args[1], 'r')
else:
sys.stderr.write(__doc__)
sys.exit(1)
option_set = set([o.upper().strip() for o in option.split(',') if o.strip()])
if (not option_set):
emsg = 'ERROR!! You must provide at least one segment identifier\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
for seg_id in option_set:
if (len(seg_id) > 4):
emsg = "ERROR!! Segment identifier name is invalid: '{}'\n"
sys.stderr.write(emsg.format(seg_id))
sys.stderr.write(__doc__)
sys.exit(1)
return (fh, option_set)<|docstring|>Checks whether to read from stdin/file and validates user input/options.<|endoftext|> |
bc8c167e7e07c1060a9dac07e5cd5bc17b3dfa59c38e16dc3b8c6a8eb497b9cb | def run(fhandle, segment_set):
'\n Filter the PDB file for specific segment identifiers.\n\n This function is a generator.\n\n Parameters\n ----------\n fhandle : a line-by-line iterator of the original PDB file.\n\n segment_set : set, list, or tuple\n The set of segment identifiers.\n\n Yields\n ------\n str (line-by-line)\n The lines only from the segment set.\n '
records = ('ATOM', 'HETATM', 'ANISOU')
for line in fhandle:
if line.startswith(records):
if (line[72:76].strip() not in segment_set):
continue
(yield line) | Filter the PDB file for specific segment identifiers.
This function is a generator.
Parameters
----------
fhandle : a line-by-line iterator of the original PDB file.
segment_set : set, list, or tuple
The set of segment identifiers.
Yields
------
str (line-by-line)
The lines only from the segment set. | pdbtools/pdb_selseg.py | run | andrewsb8/pdb-tools | 192 | python | def run(fhandle, segment_set):
'\n Filter the PDB file for specific segment identifiers.\n\n This function is a generator.\n\n Parameters\n ----------\n fhandle : a line-by-line iterator of the original PDB file.\n\n segment_set : set, list, or tuple\n The set of segment identifiers.\n\n Yields\n ------\n str (line-by-line)\n The lines only from the segment set.\n '
records = ('ATOM', 'HETATM', 'ANISOU')
for line in fhandle:
if line.startswith(records):
if (line[72:76].strip() not in segment_set):
continue
(yield line) | def run(fhandle, segment_set):
'\n Filter the PDB file for specific segment identifiers.\n\n This function is a generator.\n\n Parameters\n ----------\n fhandle : a line-by-line iterator of the original PDB file.\n\n segment_set : set, list, or tuple\n The set of segment identifiers.\n\n Yields\n ------\n str (line-by-line)\n The lines only from the segment set.\n '
records = ('ATOM', 'HETATM', 'ANISOU')
for line in fhandle:
if line.startswith(records):
if (line[72:76].strip() not in segment_set):
continue
(yield line)<|docstring|>Filter the PDB file for specific segment identifiers.
This function is a generator.
Parameters
----------
fhandle : a line-by-line iterator of the original PDB file.
segment_set : set, list, or tuple
The set of segment identifiers.
Yields
------
str (line-by-line)
The lines only from the segment set.<|endoftext|> |
9c065f67ca15415eb67f1409bbe000fc762cc08ca1efe1019f5a51222e09a9de | def abspath(myPath):
' Get absolute path to resource, works for dev and for PyInstaller '
import os, sys
try:
base_path = sys._MEIPASS
return os.path.join(base_path, os.path.basename(myPath))
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath) | Get absolute path to resource, works for dev and for PyInstaller | gputools/core/ocltypes.py | abspath | VolkerH/gputools | 0 | python | def abspath(myPath):
' '
import os, sys
try:
base_path = sys._MEIPASS
return os.path.join(base_path, os.path.basename(myPath))
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath) | def abspath(myPath):
' '
import os, sys
try:
base_path = sys._MEIPASS
return os.path.join(base_path, os.path.basename(myPath))
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath)<|docstring|>Get absolute path to resource, works for dev and for PyInstaller<|endoftext|> |
f2f3cbe2c92e8ca6fd299143e7973707cc48df42b6265db90a64d609a54fe405 | def _wrap_OCLArray(cls):
'\n WRAPPER\n '
def prepare(arr):
return np.require(arr, None, 'C')
@classmethod
def from_array(cls, arr, *args, **kwargs):
queue = get_device().queue
return cl_array.to_device(queue, prepare(arr), *args, **kwargs)
@classmethod
def empty(cls, shape, dtype=np.float32):
queue = get_device().queue
return cl_array.empty(queue, shape, dtype)
@classmethod
def empty_like(cls, arr):
return cls.empty(arr.shape, arr.dtype)
@classmethod
def zeros(cls, shape, dtype=np.float32):
queue = get_device().queue
return cl_array.zeros(queue, shape, dtype)
@classmethod
def zeros_like(cls, arr):
queue = get_device().queue
return cl_array.zeros_like(queue, arr)
def copy_buffer(self, buf, **kwargs):
queue = get_device().queue
return cl.enqueue_copy(queue, self.data, buf.data, **kwargs)
def write_array(self, data, **kwargs):
queue = get_device().queue
return cl.enqueue_write_buffer(queue, self.data, prepare(data), **kwargs)
def copy_image(self, img, **kwargs):
queue = get_device().queue
return cl.enqueue_copy(queue, self.data, img, offset=0, origin=(0, 0), region=img.shape, **kwargs)
def copy_image_resampled(self, img, **kwargs):
if (self.dtype.type == np.float32):
type_str = 'float'
elif (self.dtype.type == np.complex64):
type_str = 'complex'
else:
raise NotImplementedError('only resampling of float32 and complex64 arrays possible ')
kern_str = ('img%dd_to_buf_%s' % (len(img.shape), type_str))
OCLArray._resample_prog.run_kernel(kern_str, self.shape[::(- 1)], None, img, self.data)
def wrap_module_func(mod, f):
def func(self, *args, **kwargs):
return getattr(mod, f)(self, *args, **kwargs)
return func
cls.from_array = from_array
cls.empty = empty
cls.empty_like = empty_like
cls.zeros = zeros
cls.zeros_like = zeros_like
cls.copy_buffer = copy_buffer
cls.copy_image = copy_image
cls.copy_image_resampled = copy_image_resampled
cls.write_array = write_array
cls._resample_prog = OCLProgram(abspath('kernels/copy_resampled.cl'))
for f in ['sum', 'max', 'min', 'dot', 'vdot']:
setattr(cls, f, wrap_module_func(cl_array, f))
for f in dir(cl_math):
if isinstance(getattr(cl_math, f), collections.Callable):
setattr(cls, f, wrap_module_func(cl_math, f))
cls.__name__ = str('OCLArray')
return cls | WRAPPER | gputools/core/ocltypes.py | _wrap_OCLArray | VolkerH/gputools | 0 | python | def _wrap_OCLArray(cls):
'\n \n '
def prepare(arr):
return np.require(arr, None, 'C')
@classmethod
def from_array(cls, arr, *args, **kwargs):
queue = get_device().queue
return cl_array.to_device(queue, prepare(arr), *args, **kwargs)
@classmethod
def empty(cls, shape, dtype=np.float32):
queue = get_device().queue
return cl_array.empty(queue, shape, dtype)
@classmethod
def empty_like(cls, arr):
return cls.empty(arr.shape, arr.dtype)
@classmethod
def zeros(cls, shape, dtype=np.float32):
queue = get_device().queue
return cl_array.zeros(queue, shape, dtype)
@classmethod
def zeros_like(cls, arr):
queue = get_device().queue
return cl_array.zeros_like(queue, arr)
def copy_buffer(self, buf, **kwargs):
queue = get_device().queue
return cl.enqueue_copy(queue, self.data, buf.data, **kwargs)
def write_array(self, data, **kwargs):
queue = get_device().queue
return cl.enqueue_write_buffer(queue, self.data, prepare(data), **kwargs)
def copy_image(self, img, **kwargs):
queue = get_device().queue
return cl.enqueue_copy(queue, self.data, img, offset=0, origin=(0, 0), region=img.shape, **kwargs)
def copy_image_resampled(self, img, **kwargs):
if (self.dtype.type == np.float32):
type_str = 'float'
elif (self.dtype.type == np.complex64):
type_str = 'complex'
else:
raise NotImplementedError('only resampling of float32 and complex64 arrays possible ')
kern_str = ('img%dd_to_buf_%s' % (len(img.shape), type_str))
OCLArray._resample_prog.run_kernel(kern_str, self.shape[::(- 1)], None, img, self.data)
def wrap_module_func(mod, f):
def func(self, *args, **kwargs):
return getattr(mod, f)(self, *args, **kwargs)
return func
cls.from_array = from_array
cls.empty = empty
cls.empty_like = empty_like
cls.zeros = zeros
cls.zeros_like = zeros_like
cls.copy_buffer = copy_buffer
cls.copy_image = copy_image
cls.copy_image_resampled = copy_image_resampled
cls.write_array = write_array
cls._resample_prog = OCLProgram(abspath('kernels/copy_resampled.cl'))
for f in ['sum', 'max', 'min', 'dot', 'vdot']:
setattr(cls, f, wrap_module_func(cl_array, f))
for f in dir(cl_math):
if isinstance(getattr(cl_math, f), collections.Callable):
setattr(cls, f, wrap_module_func(cl_math, f))
cls.__name__ = str('OCLArray')
return cls | def _wrap_OCLArray(cls):
'\n \n '
def prepare(arr):
return np.require(arr, None, 'C')
@classmethod
def from_array(cls, arr, *args, **kwargs):
queue = get_device().queue
return cl_array.to_device(queue, prepare(arr), *args, **kwargs)
@classmethod
def empty(cls, shape, dtype=np.float32):
queue = get_device().queue
return cl_array.empty(queue, shape, dtype)
@classmethod
def empty_like(cls, arr):
return cls.empty(arr.shape, arr.dtype)
@classmethod
def zeros(cls, shape, dtype=np.float32):
queue = get_device().queue
return cl_array.zeros(queue, shape, dtype)
@classmethod
def zeros_like(cls, arr):
queue = get_device().queue
return cl_array.zeros_like(queue, arr)
def copy_buffer(self, buf, **kwargs):
queue = get_device().queue
return cl.enqueue_copy(queue, self.data, buf.data, **kwargs)
def write_array(self, data, **kwargs):
queue = get_device().queue
return cl.enqueue_write_buffer(queue, self.data, prepare(data), **kwargs)
def copy_image(self, img, **kwargs):
queue = get_device().queue
return cl.enqueue_copy(queue, self.data, img, offset=0, origin=(0, 0), region=img.shape, **kwargs)
def copy_image_resampled(self, img, **kwargs):
if (self.dtype.type == np.float32):
type_str = 'float'
elif (self.dtype.type == np.complex64):
type_str = 'complex'
else:
raise NotImplementedError('only resampling of float32 and complex64 arrays possible ')
kern_str = ('img%dd_to_buf_%s' % (len(img.shape), type_str))
OCLArray._resample_prog.run_kernel(kern_str, self.shape[::(- 1)], None, img, self.data)
def wrap_module_func(mod, f):
def func(self, *args, **kwargs):
return getattr(mod, f)(self, *args, **kwargs)
return func
cls.from_array = from_array
cls.empty = empty
cls.empty_like = empty_like
cls.zeros = zeros
cls.zeros_like = zeros_like
cls.copy_buffer = copy_buffer
cls.copy_image = copy_image
cls.copy_image_resampled = copy_image_resampled
cls.write_array = write_array
cls._resample_prog = OCLProgram(abspath('kernels/copy_resampled.cl'))
for f in ['sum', 'max', 'min', 'dot', 'vdot']:
setattr(cls, f, wrap_module_func(cl_array, f))
for f in dir(cl_math):
if isinstance(getattr(cl_math, f), collections.Callable):
setattr(cls, f, wrap_module_func(cl_math, f))
cls.__name__ = str('OCLArray')
return cls<|docstring|>WRAPPER<|endoftext|> |
f4e609022adc7bb5f7608a0e5e18e0186b94bd5f6353c2dcad2be5c4e58e4a3d | def copy_buffer(self, buf):
'\n copy content of buf into im\n '
queue = get_device().queue
if hasattr(self, 'shape'):
imshape = self.shape
else:
imshape = (self.width,)
assert (imshape == buf.shape[::(- 1)])
ndim = len(imshape)
cl.enqueue_copy(queue, self, buf.data, offset=0, origin=((0,) * ndim), region=imshape) | copy content of buf into im | gputools/core/ocltypes.py | copy_buffer | VolkerH/gputools | 0 | python | def copy_buffer(self, buf):
'\n \n '
queue = get_device().queue
if hasattr(self, 'shape'):
imshape = self.shape
else:
imshape = (self.width,)
assert (imshape == buf.shape[::(- 1)])
ndim = len(imshape)
cl.enqueue_copy(queue, self, buf.data, offset=0, origin=((0,) * ndim), region=imshape) | def copy_buffer(self, buf):
'\n \n '
queue = get_device().queue
if hasattr(self, 'shape'):
imshape = self.shape
else:
imshape = (self.width,)
assert (imshape == buf.shape[::(- 1)])
ndim = len(imshape)
cl.enqueue_copy(queue, self, buf.data, offset=0, origin=((0,) * ndim), region=imshape)<|docstring|>copy content of buf into im<|endoftext|> |
dce64e88cd004287f07ead989b1bba546e53c940e517cd78faf174c6a74c4e79 | def _profile(user):
'Create an User Profile.'
profile = UserProfile()
profile.user_id = user.id
profile.save() | Create an User Profile. | shop/accounts/utils.py | _profile | Anych/mila-iris | 0 | python | def _profile(user):
profile = UserProfile()
profile.user_id = user.id
profile.save() | def _profile(user):
profile = UserProfile()
profile.user_id = user.id
profile.save()<|docstring|>Create an User Profile.<|endoftext|> |
234f92ee4550ce5c4c7c07378902f3c7360b2156b7db574303d4b0a9aee26dd3 | def _redirect_to_next_page(request):
"\n Redirect users to 'next' page\n when they were redirect to login page.\n "
url = request.META.get('HTTP_REFERER')
query = requests.utils.urlparse(url).query
params = dict((x.split('=') for x in query.split('&')))
if ('next' in params):
nextPage = params['next']
return redirect(nextPage) | Redirect users to 'next' page
when they were redirect to login page. | shop/accounts/utils.py | _redirect_to_next_page | Anych/mila-iris | 0 | python | def _redirect_to_next_page(request):
"\n Redirect users to 'next' page\n when they were redirect to login page.\n "
url = request.META.get('HTTP_REFERER')
query = requests.utils.urlparse(url).query
params = dict((x.split('=') for x in query.split('&')))
if ('next' in params):
nextPage = params['next']
return redirect(nextPage) | def _redirect_to_next_page(request):
"\n Redirect users to 'next' page\n when they were redirect to login page.\n "
url = request.META.get('HTTP_REFERER')
query = requests.utils.urlparse(url).query
params = dict((x.split('=') for x in query.split('&')))
if ('next' in params):
nextPage = params['next']
return redirect(nextPage)<|docstring|>Redirect users to 'next' page
when they were redirect to login page.<|endoftext|> |
31bd608d790561ac5d0b0684afb8d27867410a3cc3edd8ac7509bd3991497260 | def to_boolean(value, ctx):
'\n Tries conversion of any value to a boolean\n '
if isinstance(value, bool):
return value
elif isinstance(value, int):
return (value != 0)
elif isinstance(value, Decimal):
return (value != Decimal(0))
elif isinstance(value, six.string_types):
value = value.lower()
if (value == 'true'):
return True
elif (value == 'false'):
return False
elif (isinstance(value, datetime.date) or isinstance(value, datetime.time)):
return True
raise EvaluationError(("Can't convert '%s' to a boolean" % six.text_type(value))) | Tries conversion of any value to a boolean | python/temba_expressions/conversions.py | to_boolean | greatnonprofits-nfp/ccl-expressions | 0 | python | def to_boolean(value, ctx):
'\n \n '
if isinstance(value, bool):
return value
elif isinstance(value, int):
return (value != 0)
elif isinstance(value, Decimal):
return (value != Decimal(0))
elif isinstance(value, six.string_types):
value = value.lower()
if (value == 'true'):
return True
elif (value == 'false'):
return False
elif (isinstance(value, datetime.date) or isinstance(value, datetime.time)):
return True
raise EvaluationError(("Can't convert '%s' to a boolean" % six.text_type(value))) | def to_boolean(value, ctx):
'\n \n '
if isinstance(value, bool):
return value
elif isinstance(value, int):
return (value != 0)
elif isinstance(value, Decimal):
return (value != Decimal(0))
elif isinstance(value, six.string_types):
value = value.lower()
if (value == 'true'):
return True
elif (value == 'false'):
return False
elif (isinstance(value, datetime.date) or isinstance(value, datetime.time)):
return True
raise EvaluationError(("Can't convert '%s' to a boolean" % six.text_type(value)))<|docstring|>Tries conversion of any value to a boolean<|endoftext|> |
1e17a0755e17456d5b7160dac461beb229b265744ad37589d52fcfe48e6b232f | def to_integer(value, ctx):
'\n Tries conversion of any value to an integer\n '
if isinstance(value, bool):
return (1 if value else 0)
elif isinstance(value, int):
return value
elif isinstance(value, Decimal):
try:
val = int(value.to_integral_exact(ROUND_HALF_UP))
if isinstance(val, int):
return val
except ArithmeticError:
pass
elif isinstance(value, six.string_types):
try:
return int(value)
except ValueError:
pass
raise EvaluationError(("Can't convert '%s' to an integer" % six.text_type(value))) | Tries conversion of any value to an integer | python/temba_expressions/conversions.py | to_integer | greatnonprofits-nfp/ccl-expressions | 0 | python | def to_integer(value, ctx):
'\n \n '
if isinstance(value, bool):
return (1 if value else 0)
elif isinstance(value, int):
return value
elif isinstance(value, Decimal):
try:
val = int(value.to_integral_exact(ROUND_HALF_UP))
if isinstance(val, int):
return val
except ArithmeticError:
pass
elif isinstance(value, six.string_types):
try:
return int(value)
except ValueError:
pass
raise EvaluationError(("Can't convert '%s' to an integer" % six.text_type(value))) | def to_integer(value, ctx):
'\n \n '
if isinstance(value, bool):
return (1 if value else 0)
elif isinstance(value, int):
return value
elif isinstance(value, Decimal):
try:
val = int(value.to_integral_exact(ROUND_HALF_UP))
if isinstance(val, int):
return val
except ArithmeticError:
pass
elif isinstance(value, six.string_types):
try:
return int(value)
except ValueError:
pass
raise EvaluationError(("Can't convert '%s' to an integer" % six.text_type(value)))<|docstring|>Tries conversion of any value to an integer<|endoftext|> |
7f5aeef4b9da61ff9f823151601aeb049006a1ee42416767aabc5d096ae5f580 | def to_decimal(value, ctx):
'\n Tries conversion of any value to a decimal\n '
if isinstance(value, bool):
return (Decimal(1) if value else Decimal(0))
elif isinstance(value, int):
return Decimal(value)
elif isinstance(value, Decimal):
return value
elif isinstance(value, six.string_types):
try:
return Decimal(value)
except Exception:
pass
raise EvaluationError(("Can't convert '%s' to a decimal" % six.text_type(value))) | Tries conversion of any value to a decimal | python/temba_expressions/conversions.py | to_decimal | greatnonprofits-nfp/ccl-expressions | 0 | python | def to_decimal(value, ctx):
'\n \n '
if isinstance(value, bool):
return (Decimal(1) if value else Decimal(0))
elif isinstance(value, int):
return Decimal(value)
elif isinstance(value, Decimal):
return value
elif isinstance(value, six.string_types):
try:
return Decimal(value)
except Exception:
pass
raise EvaluationError(("Can't convert '%s' to a decimal" % six.text_type(value))) | def to_decimal(value, ctx):
'\n \n '
if isinstance(value, bool):
return (Decimal(1) if value else Decimal(0))
elif isinstance(value, int):
return Decimal(value)
elif isinstance(value, Decimal):
return value
elif isinstance(value, six.string_types):
try:
return Decimal(value)
except Exception:
pass
raise EvaluationError(("Can't convert '%s' to a decimal" % six.text_type(value)))<|docstring|>Tries conversion of any value to a decimal<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.