text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Appends a row of value to the end of the data. Be very careful with this function as for sorted Series it will <END_TASK> <USER_TASK:> Description: def append_row(self, index, value): """ Appends a row of value to the end of the data. Be very careful with this function as for sorted Series it will not enforce sort order. Use this only for speed when needed, be careful. :param index: index :param value: value :return: nothing """
if index in self._index: raise IndexError('index already in Series') self._index.append(index) self._data.append(value)
<SYSTEM_TASK:> Appends values to the end of the data. Be very careful with this function as for sort DataFrames it will not <END_TASK> <USER_TASK:> Description: def append_rows(self, indexes, values): """ Appends values to the end of the data. Be very careful with this function as for sort DataFrames it will not enforce sort order. Use this only for speed when needed, be careful. :param indexes: list of indexes to append :param values: list of values to append :return: nothing """
# check that the values data is less than or equal to the length of the indexes if len(values) != len(indexes): raise ValueError('length of values is not equal to length of indexes') # check the indexes are not duplicates combined_index = self._index + indexes if len(set(combined_index)) != len(combined_index): raise IndexError('duplicate indexes in Series') # append index value self._index.extend(indexes) self._data.extend(values)
<SYSTEM_TASK:> Delete rows from the DataFrame <END_TASK> <USER_TASK:> Description: def delete(self, indexes): """ Delete rows from the DataFrame :param indexes: either a list of values or list of booleans for the rows to delete :return: nothing """
indexes = [indexes] if not isinstance(indexes, (list, blist)) else indexes if all([isinstance(i, bool) for i in indexes]): # boolean list if len(indexes) != len(self._index): raise ValueError('boolean indexes list must be same size of existing indexes') indexes = [i for i, x in enumerate(indexes) if x] else: indexes = [sorted_index(self._index, x) for x in indexes] if self._sort \ else [self._index.index(x) for x in indexes] indexes = sorted(indexes, reverse=True) # need to sort and reverse list so deleting works for i in indexes: del self._data[i] # now remove from index for i in indexes: del self._index[i]
<SYSTEM_TASK:> Resets the index of the Series to simple integer list and the index name to 'index'. <END_TASK> <USER_TASK:> Description: def reset_index(self): """ Resets the index of the Series to simple integer list and the index name to 'index'. :return: nothing """
self.index = list(range(self.__len__())) self.index_name = 'index'
<SYSTEM_TASK:> Wrapper function for get. It will return a list, no index. If the indexes are integers it will be assumed <END_TASK> <USER_TASK:> Description: def value(self, indexes, int_as_index=False): """ Wrapper function for get. It will return a list, no index. If the indexes are integers it will be assumed that they are locations unless int_as_index = True. If the indexes are locations then they will be rotated to the left by offset number of locations. :param indexes: integer location, single index, list of indexes or list of boolean :param int_as_index: if True then will treat int index values as indexes and not locations :return: value or list of values """
# single integer value if isinstance(indexes, int): if int_as_index: return self.get(indexes, as_list=True) else: indexes = indexes - self._offset return self._data[indexes] # slice elif isinstance(indexes, slice): if isinstance(indexes.start, int) and not int_as_index: # treat as location start = indexes.start - self._offset stop = indexes.stop - self._offset + 1 # to capture the last value # check locations are valid and will not return empty if start > stop: raise IndexError('end of slice is before start of slice') if (start > 0 > stop) or (start < 0 < stop): raise IndexError('slide indexes invalid with given offset:%f' % self._offset) # where end is the last element if (start < 0) and stop == 0: return self._data[start:] return self._data[start:stop] else: # treat as index indexes = self._slice_index(indexes) return self.get(indexes, as_list=True) # list of booleans elif all([isinstance(x, bool) for x in indexes]): return self.get(indexes, as_list=True) # list of values elif isinstance(indexes, list): if int_as_index or not isinstance(indexes[0], int): return self.get(indexes, as_list=True) else: indexes = [x - self._offset for x in indexes] return self.get_locations(indexes, as_list=True) # just a single value else: return self.get(indexes)
<SYSTEM_TASK:> Creates and return a Series from a DataFrame and specific column <END_TASK> <USER_TASK:> Description: def from_dataframe(cls, dataframe, column, offset=0): """ Creates and return a Series from a DataFrame and specific column :param dataframe: raccoon DataFrame :param column: column name :param offset: offset value must be provided as there is no equivalent for a DataFrame :return: Series """
return cls(data=dataframe.get_entire_column(column, as_list=True), index=dataframe.index, data_name=column, index_name=dataframe.index_name, sort=dataframe.sort, offset=offset)
<SYSTEM_TASK:> Creates and return a Series from a Series <END_TASK> <USER_TASK:> Description: def from_series(cls, series, offset=0): """ Creates and return a Series from a Series :param series: raccoon Series :param offset: offset value must be provided as there is no equivalent for a DataFrame :return: Series """
return cls(data=series.data, index=series.index, data_name=series.data_name, index_name=series.index_name, sort=series.sort, offset=offset)
<SYSTEM_TASK:> Return the downstream leaf stages of this stage. <END_TASK> <USER_TASK:> Description: def getLeaves(self): """Return the downstream leaf stages of this stage."""
result = list() if not self._next_stages: result.append(self) else: for stage in self._next_stages: leaves = stage.getLeaves() result += leaves return result
<SYSTEM_TASK:> Create and start up the internal workers. <END_TASK> <USER_TASK:> Description: def build(self): """Create and start up the internal workers."""
# If there's no output tube, it means that this stage # is at the end of a fork (hasn't been linked to any stage downstream). # Therefore, create one output tube. if not self._output_tubes: self._output_tubes.append(self._worker_class.getTubeClass()()) self._worker_class.assemble( self._worker_args, self._input_tube, self._output_tubes, self._size, self._disable_result, self._do_stop_task, ) # Build all downstream stages. for stage in self._next_stages: stage.build()
<SYSTEM_TASK:> Given a list of column names will sort the DataFrame columns to match the given order <END_TASK> <USER_TASK:> Description: def _sort_columns(self, columns_list): """ Given a list of column names will sort the DataFrame columns to match the given order :param columns_list: list of column names. Must include all column names :return: nothing """
if not (all([x in columns_list for x in self._columns]) and all([x in self._columns for x in columns_list])): raise ValueError( 'columns_list must be all in current columns, and all current columns must be in columns_list') new_sort = [self._columns.index(x) for x in columns_list] self._data = blist([self._data[x] for x in new_sort]) if self._blist else [self._data[x] for x in new_sort] self._columns = blist([self._columns[x] for x in new_sort]) if self._blist \ else [self._columns[x] for x in new_sort]
<SYSTEM_TASK:> Pad the data in DataFrame with [None} to ensure that all columns have the same length. <END_TASK> <USER_TASK:> Description: def _pad_data(self, max_len=None): """ Pad the data in DataFrame with [None} to ensure that all columns have the same length. :param max_len: If provided will extend all columns to this length, if not then will use the longest column :return: nothing """
if not max_len: max_len = max([len(x) for x in self._data]) for _, col in enumerate(self._data): col.extend([None] * (max_len - len(col)))
<SYSTEM_TASK:> Given indexes and columns will return a sub-set of the DataFrame. This method will direct to the below methods <END_TASK> <USER_TASK:> Description: def get(self, indexes=None, columns=None, as_list=False, as_dict=False): """ Given indexes and columns will return a sub-set of the DataFrame. This method will direct to the below methods based on what types are passed in for the indexes and columns. The type of the return is determined by the types of the parameters. :param indexes: index value, list of index values, or a list of booleans. If None then all indexes are used :param columns: column name or list of column names. If None then all columns are used :param as_list: if True then return the values as a list, if False return a DataFrame. This is only used if the get is for a single column :param as_dict: if True then return the values as a dictionary, if False return a DataFrame. This is only used if the get is for a single row :return: either DataFrame, list, dict or single value. The return is a shallow copy """
if (indexes is None) and (columns is not None) and (not isinstance(columns, (list, blist))): return self.get_entire_column(columns, as_list) if indexes is None: indexes = [True] * len(self._index) if columns is None: columns = [True] * len(self._columns) if isinstance(indexes, (list, blist)) and isinstance(columns, (list, blist)): return self.get_matrix(indexes, columns) elif isinstance(indexes, (list, blist)) and (not isinstance(columns, (list, blist))): return self.get_rows(indexes, columns, as_list) elif (not isinstance(indexes, (list, blist))) and isinstance(columns, (list, blist)): return self.get_columns(indexes, columns, as_dict) else: return self.get_cell(indexes, columns)
<SYSTEM_TASK:> For a single index and column value return the value of the cell <END_TASK> <USER_TASK:> Description: def get_cell(self, index, column): """ For a single index and column value return the value of the cell :param index: index value :param column: column name :return: value """
i = sorted_index(self._index, index) if self._sort else self._index.index(index) c = self._columns.index(column) return self._data[c][i]
<SYSTEM_TASK:> For a list of indexes and a single column name return the values of the indexes in that column. <END_TASK> <USER_TASK:> Description: def get_rows(self, indexes, column, as_list=False): """ For a list of indexes and a single column name return the values of the indexes in that column. :param indexes: either a list of index values or a list of booleans with same length as all indexes :param column: single column name :param as_list: if True return a list, if False return DataFrame :return: DataFrame is as_list if False, a list if as_list is True """
c = self._columns.index(column) if all([isinstance(i, bool) for i in indexes]): # boolean list if len(indexes) != len(self._index): raise ValueError('boolean index list must be same size of existing index') if all(indexes): # the entire column data = self._data[c] index = self._index else: data = list(compress(self._data[c], indexes)) index = list(compress(self._index, indexes)) else: # index values list locations = [sorted_index(self._index, x) for x in indexes] if self._sort \ else [self._index.index(x) for x in indexes] data = [self._data[c][i] for i in locations] index = [self._index[i] for i in locations] return data if as_list else DataFrame(data={column: data}, index=index, index_name=self._index_name, sort=self._sort)
<SYSTEM_TASK:> For a single index and list of column names return a DataFrame of the values in that index as either a dict <END_TASK> <USER_TASK:> Description: def get_columns(self, index, columns=None, as_dict=False): """ For a single index and list of column names return a DataFrame of the values in that index as either a dict or a DataFrame :param index: single index value :param columns: list of column names :param as_dict: if True then return the result as a dictionary :return: DataFrame or dictionary """
i = sorted_index(self._index, index) if self._sort else self._index.index(index) return self.get_location(i, columns, as_dict)
<SYSTEM_TASK:> Shortcut method to retrieve a single column all rows. Since this is a common use case this method will be <END_TASK> <USER_TASK:> Description: def get_entire_column(self, column, as_list=False): """ Shortcut method to retrieve a single column all rows. Since this is a common use case this method will be faster than the more general method. :param column: single column name :param as_list: if True return a list, if False return DataFrame :return: DataFrame is as_list if False, a list if as_list is True """
c = self._columns.index(column) data = self._data[c] return data if as_list else DataFrame(data={column: data}, index=self._index, index_name=self._index_name, sort=self._sort)
<SYSTEM_TASK:> For a list of indexes and list of columns return a DataFrame of the values. <END_TASK> <USER_TASK:> Description: def get_matrix(self, indexes, columns): """ For a list of indexes and list of columns return a DataFrame of the values. :param indexes: either a list of index values or a list of booleans with same length as all indexes :param columns: list of column names :return: DataFrame """
if all([isinstance(i, bool) for i in indexes]): # boolean list is_bool_indexes = True if len(indexes) != len(self._index): raise ValueError('boolean index list must be same size of existing index') bool_indexes = indexes indexes = list(compress(self._index, indexes)) else: is_bool_indexes = False locations = [sorted_index(self._index, x) for x in indexes] if self._sort \ else [self._index.index(x) for x in indexes] if all([isinstance(i, bool) for i in columns]): # boolean list if len(columns) != len(self._columns): raise ValueError('boolean column list must be same size of existing columns') columns = list(compress(self._columns, columns)) col_locations = [self._columns.index(x) for x in columns] data_dict = dict() for c in col_locations: data_dict[self._columns[c]] = list(compress(self._data[c], bool_indexes)) if is_bool_indexes \ else [self._data[c][i] for i in locations] return DataFrame(data=data_dict, index=indexes, columns=columns, index_name=self._index_name, sort=self._sort)
<SYSTEM_TASK:> For list of locations and list of columns return a DataFrame of the values. <END_TASK> <USER_TASK:> Description: def get_locations(self, locations, columns=None, **kwargs): """ For list of locations and list of columns return a DataFrame of the values. :param locations: list of index locations :param columns: list of column names :param kwargs: will pass along these parameters to the get() method :return: DataFrame """
indexes = [self._index[x] for x in locations] return self.get(indexes, columns, **kwargs)
<SYSTEM_TASK:> Insert a new row in the DataFrame. <END_TASK> <USER_TASK:> Description: def _insert_row(self, i, index): """ Insert a new row in the DataFrame. :param i: index location to insert :param index: index value to insert into the index list :return: nothing """
if i == len(self._index): self._add_row(index) else: self._index.insert(i, index) for c in range(len(self._columns)): self._data[c].insert(i, None)
<SYSTEM_TASK:> Add a new column to the DataFrame <END_TASK> <USER_TASK:> Description: def _add_column(self, column): """ Add a new column to the DataFrame :param column: column name :return: nothing """
self._columns.append(column) if self._blist: self._data.append(blist([None] * len(self._index))) else: self._data.append([None] * len(self._index))
<SYSTEM_TASK:> Given indexes and columns will set a sub-set of the DataFrame to the values provided. This method will direct <END_TASK> <USER_TASK:> Description: def set(self, indexes=None, columns=None, values=None): """ Given indexes and columns will set a sub-set of the DataFrame to the values provided. This method will direct to the below methods based on what types are passed in for the indexes and columns. If the indexes or columns contains values not in the DataFrame then new rows or columns will be added. :param indexes: indexes value, list of indexes values, or a list of booleans. If None then all indexes are used :param columns: columns name, if None then all columns are used. Currently can only handle a single column or\ all columns :param values: value or list of values to set (index, column) to. If setting just a single row, then must be a\ dict where the keys are the column names. If a list then must be the same length as the indexes parameter, if\ indexes=None, then must be the same and length of DataFrame :return: nothing """
if (indexes is not None) and (columns is not None): if isinstance(indexes, (list, blist)): self.set_column(indexes, columns, values) else: self.set_cell(indexes, columns, values) elif (indexes is not None) and (columns is None): self.set_row(indexes, values) elif (indexes is None) and (columns is not None): self.set_column(indexes, columns, values) else: raise ValueError('either or both of indexes or columns must be provided')
<SYSTEM_TASK:> Sets the values of the columns in a single row. <END_TASK> <USER_TASK:> Description: def set_row(self, index, values): """ Sets the values of the columns in a single row. :param index: index value :param values: dict with the keys as the column names and the values what to set that column to :return: nothing """
if self._sort: exists, i = sorted_exists(self._index, index) if not exists: self._insert_row(i, index) else: try: i = self._index.index(index) except ValueError: # new row i = len(self._index) self._add_row(index) if isinstance(values, dict): if not (set(values.keys()).issubset(self._columns)): raise ValueError('keys of values are not all in existing columns') for c, column in enumerate(self._columns): self._data[c][i] = values.get(column, self._data[c][i]) else: raise TypeError('cannot handle values of this type.')
<SYSTEM_TASK:> Set a column to a single value or list of values. If any of the index values are not in the current indexes <END_TASK> <USER_TASK:> Description: def set_column(self, index=None, column=None, values=None): """ Set a column to a single value or list of values. If any of the index values are not in the current indexes then a new row will be created. :param index: list of index values or list of booleans. If a list of booleans then the list must be the same\ length as the DataFrame :param column: column name :param values: either a single value or a list. The list must be the same length as the index list if the index\ list is values, or the length of the True values in the index list if the index list is booleans :return: nothing """
try: c = self._columns.index(column) except ValueError: # new column c = len(self._columns) self._add_column(column) if index: # index was provided if all([isinstance(i, bool) for i in index]): # boolean list if not isinstance(values, (list, blist)): # single value provided, not a list, so turn values into list values = [values for x in index if x] if len(index) != len(self._index): raise ValueError('boolean index list must be same size of existing index') if len(values) != index.count(True): raise ValueError('length of values list must equal number of True entries in index list') indexes = [i for i, x in enumerate(index) if x] for x, i in enumerate(indexes): self._data[c][i] = values[x] else: # list of index if not isinstance(values, (list, blist)): # single value provided, not a list, so turn values into list values = [values for _ in index] if len(values) != len(index): raise ValueError('length of values and index must be the same.') # insert or append indexes as needed if self._sort: exists_tuples = list(zip(*[sorted_exists(self._index, x) for x in index])) exists = exists_tuples[0] indexes = exists_tuples[1] if not all(exists): self._insert_missing_rows(index) indexes = [sorted_index(self._index, x) for x in index] else: try: # all index in current index indexes = [self._index.index(x) for x in index] except ValueError: # new rows need to be added self._add_missing_rows(index) indexes = [self._index.index(x) for x in index] for x, i in enumerate(indexes): self._data[c][i] = values[x] else: # no index, only values if not isinstance(values, (list, blist)): # values not a list, turn into one of length same as index values = [values for _ in self._index] if len(values) != len(self._index): raise ValueError('values list must be at same length as current index length.') else: self._data[c] = blist(values) if self._blist else values
<SYSTEM_TASK:> Appends a row of values to the end of the data. If there are new columns in the values and new_cols is True <END_TASK> <USER_TASK:> Description: def append_row(self, index, values, new_cols=True): """ Appends a row of values to the end of the data. If there are new columns in the values and new_cols is True they will be added. Be very careful with this function as for sort DataFrames it will not enforce sort order. Use this only for speed when needed, be careful. :param index: value of the index :param values: dictionary of values :param new_cols: if True add new columns in values, if False ignore :return: nothing """
if index in self._index: raise IndexError('index already in DataFrame') if new_cols: for col in values: if col not in self._columns: self._add_column(col) # append index value self._index.append(index) # add data values, if not in values then use None for c, col in enumerate(self._columns): self._data[c].append(values.get(col, None))
<SYSTEM_TASK:> Appends rows of values to the end of the data. If there are new columns in the values and new_cols is True <END_TASK> <USER_TASK:> Description: def append_rows(self, indexes, values, new_cols=True): """ Appends rows of values to the end of the data. If there are new columns in the values and new_cols is True they will be added. Be very careful with this function as for sort DataFrames it will not enforce sort order. Use this only for speed when needed, be careful. :param indexes: list of indexes :param values: dictionary of values where the key is the column name and the value is a list :param new_cols: if True add new columns in values, if False ignore :return: nothing """
# check that the values data is less than or equal to the length of the indexes for column in values: if len(values[column]) > len(indexes): raise ValueError('length of %s column in values is longer than indexes' % column) # check the indexes are not duplicates combined_index = self._index + indexes if len(set(combined_index)) != len(combined_index): raise IndexError('duplicate indexes in DataFrames') if new_cols: for col in values: if col not in self._columns: self._add_column(col) # append index value self._index.extend(indexes) # add data values, if not in values then use None for c, col in enumerate(self._columns): self._data[c].extend(values.get(col, [None] * len(indexes))) self._pad_data()
<SYSTEM_TASK:> Returns a dict where the keys are the column names and the values are lists of the values for that column. <END_TASK> <USER_TASK:> Description: def to_dict(self, index=True, ordered=False): """ Returns a dict where the keys are the column names and the values are lists of the values for that column. :param index: If True then include the index in the dict with the index_name as the key :param ordered: If True then return an OrderedDict() to preserve the order of the columns in the DataFrame :return: dict or OrderedDict() """
result = OrderedDict() if ordered else dict() if index: result.update({self._index_name: self._index}) if ordered: data_dict = [(column, self._data[i]) for i, column in enumerate(self._columns)] else: data_dict = {column: self._data[i] for i, column in enumerate(self._columns)} result.update(data_dict) return result
<SYSTEM_TASK:> Renames the columns <END_TASK> <USER_TASK:> Description: def rename_columns(self, rename_dict): """ Renames the columns :param rename_dict: dict where the keys are the current column names and the values are the new names :return: nothing """
if not all([x in self._columns for x in rename_dict.keys()]): raise ValueError('all dictionary keys must be in current columns') for current in rename_dict.keys(): self._columns[self._columns.index(current)] = rename_dict[current]
<SYSTEM_TASK:> Delete columns from the DataFrame <END_TASK> <USER_TASK:> Description: def delete_columns(self, columns): """ Delete columns from the DataFrame :param columns: list of columns to delete :return: nothing """
columns = [columns] if not isinstance(columns, (list, blist)) else columns if not all([x in self._columns for x in columns]): raise ValueError('all columns must be in current columns') for column in columns: c = self._columns.index(column) del self._data[c] del self._columns[c] if not len(self._data): # if all the columns have been deleted, remove index self.index = list()
<SYSTEM_TASK:> Sort the DataFrame by the index. The sort modifies the DataFrame inplace <END_TASK> <USER_TASK:> Description: def sort_index(self): """ Sort the DataFrame by the index. The sort modifies the DataFrame inplace :return: nothing """
sort = sorted_list_indexes(self._index) # sort index self._index = blist([self._index[x] for x in sort]) if self._blist else [self._index[x] for x in sort] # each column for c in range(len(self._data)): self._data[c] = blist([self._data[c][i] for i in sort]) if self._blist else [self._data[c][i] for i in sort]
<SYSTEM_TASK:> Validate the integrity of the DataFrame. This checks that the indexes, column names and internal data are not <END_TASK> <USER_TASK:> Description: def validate_integrity(self): """ Validate the integrity of the DataFrame. This checks that the indexes, column names and internal data are not corrupted. Will raise an error if there is a problem. :return: nothing """
self._validate_columns(self._columns) self._validate_index(self._index) self._validate_data()
<SYSTEM_TASK:> Append another DataFrame to this DataFrame. If the new data_frame has columns that are not in the current <END_TASK> <USER_TASK:> Description: def append(self, data_frame): """ Append another DataFrame to this DataFrame. If the new data_frame has columns that are not in the current DataFrame then new columns will be created. All of the indexes in the data_frame must be different from the current indexes or will raise an error. :param data_frame: DataFrame to append :return: nothing """
if len(data_frame) == 0: # empty DataFrame, do nothing return data_frame_index = data_frame.index combined_index = self._index + data_frame_index if len(set(combined_index)) != len(combined_index): raise ValueError('duplicate indexes in DataFrames') for c, column in enumerate(data_frame.columns): if PYTHON3: self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c].copy()) else: self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c][:])
<SYSTEM_TASK:> Math helper method that adds element-wise two columns. If indexes are not None then will only perform the math <END_TASK> <USER_TASK:> Description: def add(self, left_column, right_column, indexes=None): """ Math helper method that adds element-wise two columns. If indexes are not None then will only perform the math on that sub-set of the columns. :param left_column: first column name :param right_column: second column name :param indexes: list of index values or list of booleans. If a list of booleans then the list must be the same\ length as the DataFrame :return: list """
left_list, right_list = self._get_lists(left_column, right_column, indexes) return [l + r for l, r in zip(left_list, right_list)]
<SYSTEM_TASK:> Returns a boolean list where each elements is whether that element in the column is in the compare_list. <END_TASK> <USER_TASK:> Description: def isin(self, column, compare_list): """ Returns a boolean list where each elements is whether that element in the column is in the compare_list. :param column: single column name, does not work for multiple columns :param compare_list: list of items to compare to :return: list of booleans """
return [x in compare_list for x in self._data[self._columns.index(column)]]
<SYSTEM_TASK:> Iterates over DataFrame rows as dictionary of the values. The index will be included. <END_TASK> <USER_TASK:> Description: def iterrows(self, index=True): """ Iterates over DataFrame rows as dictionary of the values. The index will be included. :param index: if True include the index in the results :return: dictionary """
for i in range(len(self._index)): row = {self._index_name: self._index[i]} if index else dict() for c, col in enumerate(self._columns): row[col] = self._data[c][i] yield row
<SYSTEM_TASK:> Iterates over DataFrame rows as tuple of the values. <END_TASK> <USER_TASK:> Description: def itertuples(self, index=True, name='Raccoon'): """ Iterates over DataFrame rows as tuple of the values. :param index: if True then include the index :param name: name of the namedtuple :return: namedtuple """
fields = [self._index_name] if index else list() fields.extend(self._columns) row_tuple = namedtuple(name, fields) for i in range(len(self._index)): row = {self._index_name: self._index[i]} if index else dict() for c, col in enumerate(self._columns): row[col] = self._data[c][i] yield row_tuple(**row)
<SYSTEM_TASK:> For unit testing equality of two DataFrames. <END_TASK> <USER_TASK:> Description: def assert_frame_equal(left, right, data_function=None, data_args=None): """ For unit testing equality of two DataFrames. :param left: first DataFrame :param right: second DataFrame :param data_function: if provided will use this function to assert compare the df.data :param data_args: arguments to pass to the data_function :return: nothing """
if data_function: data_args = {} if not data_args else data_args data_function(left.data, right.data, **data_args) else: assert left.data == right.data assert left.index == right.index assert left.columns == right.columns assert left.index_name == right.index_name assert left.sort == right.sort assert left.blist == right.blist
<SYSTEM_TASK:> For unit testing equality of two Series. <END_TASK> <USER_TASK:> Description: def assert_series_equal(left, right, data_function=None, data_args=None): """ For unit testing equality of two Series. :param left: first Series :param right: second Series :param data_function: if provided will use this function to assert compare the df.data :param data_args: arguments to pass to the data_function :return: nothing """
assert type(left) == type(right) if data_function: data_args = {} if not data_args else data_args data_function(left.data, right.data, **data_args) else: assert left.data == right.data assert left.index == right.index assert left.data_name == right.data_name assert left.index_name == right.index_name assert left.sort == right.sort if isinstance(left, rc.ViewSeries): assert left.offset == right.offset if isinstance(left, rc.Series): assert left.blist == right.blist
<SYSTEM_TASK:> Return result from the pipeline. <END_TASK> <USER_TASK:> Description: def get(self, timeout=None): """Return result from the pipeline."""
result = None for stage in self._output_stages: result = stage.get(timeout) return result
<SYSTEM_TASK:> Load the list of songs. <END_TASK> <USER_TASK:> Description: def load(self, verbose=False): """ Load the list of songs. Note that this only loads a list of songs that this artist was the main artist of. If they were only featured in the song, that song won't be listed here. There is a list on the artist page for that, I just haven't added any parsing code for that, since I don't need it. """
self._songs = [] page_num = 1 total_pages = 1 while page_num <= total_pages: if verbose: print('retrieving page %d' % page_num) page = requests.get(ARTIST_URL.format(artist=self.name, n=page_num)) tree = html.fromstring(page.text) song_rows_xp = r'//*[@id="popular"]/div/table/tbody/tr' songlist_pagination_xp = r'//*[@id="main-content"]/div[1]/'\ 'div[2]/p/span/a' rows = tree.xpath(song_rows_xp) for row in rows: song_link = row.xpath(r'./td/a[contains(@class,"title")]') assert len(song_link) == 1 self._songs.append(Song(url=song_link[0].attrib['href'])) total_pages = len(tree.xpath(songlist_pagination_xp)) page_num += 1 return self
<SYSTEM_TASK:> Return the distance between two points on the surface of the Earth. <END_TASK> <USER_TASK:> Description: def distance(p0, p1, deg=True, r=r_earth_mean): """ Return the distance between two points on the surface of the Earth. Parameters ---------- p0 : point-like (or array of point-like) [longitude, latitude] objects p1 : point-like (or array of point-like) [longitude, latitude] objects deg : bool, optional (default True) indicates if p0 and p1 are specified in degrees r : float, optional (default r_earth_mean) radius of the sphere Returns ------- d : float Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Distance Note: Spherical earth model. By default uses radius of 6371.0 km. """
single, (p0, p1) = _to_arrays((p0, 2), (p1, 2)) if deg: p0 = np.radians(p0) p1 = np.radians(p1) lon0, lat0 = p0[:,0], p0[:,1] lon1, lat1 = p1[:,0], p1[:,1] # h_x used to denote haversine(x): sin^2(x / 2) h_dlat = sin((lat1 - lat0) / 2.0) ** 2 h_dlon = sin((lon1 - lon0) / 2.0) ** 2 h_angle = h_dlat + cos(lat0) * cos(lat1) * h_dlon angle = 2.0 * arcsin(sqrt(h_angle)) d = r * angle if single: d = d[0] return d
<SYSTEM_TASK:> Compute the initial bearing along the great circle from p0 to p1 <END_TASK> <USER_TASK:> Description: def course(p0, p1, deg=True, bearing=False): """ Compute the initial bearing along the great circle from p0 to p1 NB: The angle returned by course() is not the traditional definition of bearing. It is definted such that 0 degrees to due East increasing counter-clockwise such that 90 degrees is due North. To obtain the bearing (0 degrees is due North increasing clockwise so that 90 degrees is due East), set the bearing flag input to True. Parameters ---------- p0 : point-like (or array of point-like) [lon, lat] objects p1 : point-like (or array of point-like) [lon, lat] objects deg : bool, optional (default True) indicates if p0 and p1 are specified in degrees. The returned angle is returned in the same units as the input. bearing : bool, optional (default False) If True, use the classical definition of bearing where 0 degrees is due North increasing clockwise so that and 90 degrees is due East. Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Bearing """
single, (p0, p1) = _to_arrays((p0, 2), (p1, 2)) if deg: p0 = np.radians(p0) p1 = np.radians(p1) lon0, lat0 = p0[:,0], p0[:,1] lon1, lat1 = p1[:,0], p1[:,1] dlon = lon1 - lon0 a = sin(dlon) * cos(lat1) b = cos(lat0) * sin(lat1) - sin(lat0) * cos(lat1) * cos(dlon) if bearing: angle = arctan2(a, b) else: angle = arctan2(b, a) if deg: angle = np.degrees(angle) if single: angle = angle[0] return angle
<SYSTEM_TASK:> Given an initial point and angle, move distance d along the surface <END_TASK> <USER_TASK:> Description: def propagate(p0, angle, d, deg=True, bearing=False, r=r_earth_mean): """ Given an initial point and angle, move distance d along the surface Parameters ---------- p0 : point-like (or array of point-like) [lon, lat] objects angle : float (or array of float) bearing. Note that by default, 0 degrees is due East increasing clockwise so that 90 degrees is due North. See the bearing flag to change the meaning of this angle d : float (or array of float) distance to move. The units of d should be consistent with input r deg : bool, optional (default True) Whether both p0 and angle are specified in degrees. The output points will also match the value of this flag. bearing : bool, optional (default False) Indicates whether to interpret the input angle as the classical definition of bearing. r : float, optional (default r_earth_mean) radius of the sphere Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Destination Note: Spherical earth model. By default uses radius of 6371.0 km. """
single, (p0, angle, d) = _to_arrays((p0, 2), (angle, 1), (d, 1)) if deg: p0 = np.radians(p0) angle = np.radians(angle) if not bearing: angle = np.pi / 2.0 - angle lon0, lat0 = p0[:,0], p0[:,1] angd = d / r lat1 = arcsin(sin(lat0) * cos(angd) + cos(lat0) * sin(angd) * cos(angle)) a = sin(angle) * sin(angd) * cos(lat0) b = cos(angd) - sin(lat0) * sin(lat1) lon1 = lon0 + arctan2(a, b) p1 = np.column_stack([lon1, lat1]) if deg: p1 = np.degrees(p1) if single: p1 = p1[0] return p1
<SYSTEM_TASK:> Validate request signature. <END_TASK> <USER_TASK:> Description: def validate(self, signature, timestamp, nonce): """Validate request signature. :param signature: A string signature parameter sent by weixin. :param timestamp: A int timestamp parameter sent by weixin. :param nonce: A int nonce parameter sent by weixin. """
if not self.token: raise RuntimeError('WEIXIN_TOKEN is missing') if self.expires_in: try: timestamp = int(timestamp) except (ValueError, TypeError): # fake timestamp return False delta = time.time() - timestamp if delta < 0: # this is a fake timestamp return False if delta > self.expires_in: # expired timestamp return False values = [self.token, str(timestamp), str(nonce)] s = ''.join(sorted(values)) hsh = hashlib.sha1(s.encode('utf-8')).hexdigest() return signature == hsh
<SYSTEM_TASK:> Parse xml body sent by weixin. <END_TASK> <USER_TASK:> Description: def parse(self, content): """Parse xml body sent by weixin. :param content: A text of xml body. """
raw = {} try: root = etree.fromstring(content) except SyntaxError as e: raise ValueError(*e.args) for child in root: raw[child.tag] = child.text formatted = self.format(raw) msg_type = formatted['type'] msg_parser = getattr(self, 'parse_%s' % msg_type, None) if callable(msg_parser): parsed = msg_parser(raw) else: parsed = self.parse_invalid_type(raw) formatted.update(parsed) return formatted
<SYSTEM_TASK:> Create the reply text for weixin. <END_TASK> <USER_TASK:> Description: def reply(self, username, type='text', sender=None, **kwargs): """Create the reply text for weixin. The reply varies per reply type. The acceptable types are `text`, `music`, `news`, `image`, `voice`, `video`. Each type accepts different parameters, but they share some common parameters: * username: the receiver's username * type: the reply type, aka text, music and news * sender: sender is optional if you have a default value Text reply requires an additional parameter of `content`. Music reply requires 4 more parameters: * title: A string for music title * description: A string for music description * music_url: A link of the music * hq_music_url: A link of the high quality music News reply requires an additional parameter of `articles`, which is a list/tuple of articles, each one is a dict: * title: A string for article title * description: A string for article description * picurl: A link for article cover image * url: A link for article url Image and Voice reply requires an additional parameter of `media_id`. Video reply requires 3 more parameters: * media_id: A string for video `media_id` * title: A string for video title * description: A string for video description """
sender = sender or self.sender if not sender: raise RuntimeError('WEIXIN_SENDER or sender argument is missing') if type == 'text': content = kwargs.get('content', '') return text_reply(username, sender, content) if type == 'music': values = {} for k in ('title', 'description', 'music_url', 'hq_music_url'): values[k] = kwargs.get(k) return music_reply(username, sender, **values) if type == 'news': items = kwargs.get('articles', []) return news_reply(username, sender, *items) if type == 'customer_service': service_account = kwargs.get('service_account', None) return transfer_customer_service_reply(username, sender, service_account) if type == 'image': media_id = kwargs.get('media_id', '') return image_reply(username, sender, media_id) if type == 'voice': media_id = kwargs.get('media_id', '') return voice_reply(username, sender, media_id) if type == 'video': values = {} for k in ('media_id', 'title', 'description'): values[k] = kwargs.get(k) return video_reply(username, sender, **values)
<SYSTEM_TASK:> Register a command helper function. <END_TASK> <USER_TASK:> Description: def register(self, key=None, func=None, **kwargs): """Register a command helper function. You can register the function:: def print_help(**kwargs): username = kwargs.get('sender') sender = kwargs.get('receiver') return weixin.reply( username, sender=sender, content='text reply' ) weixin.register('help', print_help) It is also accessible as a decorator:: @weixin.register('help') def print_help(*args, **kwargs): username = kwargs.get('sender') sender = kwargs.get('receiver') return weixin.reply( username, sender=sender, content='text reply' ) """
if func: if key is None: limitation = frozenset(kwargs.items()) self._registry_without_key.append((func, limitation)) else: self._registry[key] = func return func return self.__call__(key, **kwargs)
<SYSTEM_TASK:> Default view function for Flask app. <END_TASK> <USER_TASK:> Description: def view_func(self): """Default view function for Flask app. This is a simple implementation for view func, you can add it to your Flask app:: weixin = Weixin(app) app.add_url_rule('/', view_func=weixin.view_func) """
if request is None: raise RuntimeError('view_func need Flask be installed') signature = request.args.get('signature') timestamp = request.args.get('timestamp') nonce = request.args.get('nonce') if not self.validate(signature, timestamp, nonce): return 'signature failed', 400 if request.method == 'GET': echostr = request.args.get('echostr', '') return echostr try: ret = self.parse(request.data) except ValueError: return 'invalid', 400 if 'type' not in ret: # not a valid message return 'invalid', 400 if ret['type'] == 'text' and ret['content'] in self._registry: func = self._registry[ret['content']] else: ret_set = frozenset(ret.items()) matched_rules = ( _func for _func, _limitation in self._registry_without_key if _limitation.issubset(ret_set)) func = next(matched_rules, None) # first matched rule if func is None: if '*' in self._registry: func = self._registry['*'] else: func = 'failed' if callable(func): text = func(**ret) else: # plain text text = self.reply( username=ret['sender'], sender=ret['receiver'], content=func, ) return Response(text, content_type='text/xml; charset=utf-8')
<SYSTEM_TASK:> Reads the remote file from Gist and save it locally <END_TASK> <USER_TASK:> Description: def get(self): """Reads the remote file from Gist and save it locally"""
if self.gist: content = self.github.read_gist_file(self.gist) self.local.save(content)
<SYSTEM_TASK:> Decorator to restrict some GitHubTools methods to run only with OAuth <END_TASK> <USER_TASK:> Description: def oauth_only(function): """Decorator to restrict some GitHubTools methods to run only with OAuth"""
def check_for_oauth(self, *args, **kwargs): """ Returns False if GitHubTools instance is not authenticated, or return the decorated fucntion if it is. """ if not self.is_authenticated: self.oops("To use putgist you have to set your GETGIST_TOKEN") self.oops("(see `putgist --help` for details)") return False return function(self, *args, **kwargs) return check_for_oauth
<SYSTEM_TASK:> Encapsulte requests.get to use this class instance header <END_TASK> <USER_TASK:> Description: def get(self, url, params=None, **kwargs): """Encapsulte requests.get to use this class instance header"""
return requests.get(url, params=params, headers=self.add_headers(**kwargs))
<SYSTEM_TASK:> Encapsulte requests.patch to use this class instance header <END_TASK> <USER_TASK:> Description: def patch(self, url, data=None, **kwargs): """Encapsulte requests.patch to use this class instance header"""
return requests.patch(url, data=data, headers=self.add_headers(**kwargs))
<SYSTEM_TASK:> Encapsulte requests.post to use this class instance header <END_TASK> <USER_TASK:> Description: def post(self, url, data=None, **kwargs): """Encapsulte requests.post to use this class instance header"""
return requests.post(url, data=data, headers=self.add_headers(**kwargs))
<SYSTEM_TASK:> Backups files with the same name of the instance filename <END_TASK> <USER_TASK:> Description: def backup(self): """Backups files with the same name of the instance filename"""
count = 0 name = "{}.bkp".format(self.filename) backup = os.path.join(self.cwd, name) while os.path.exists(backup): count += 1 name = "{}.bkp{}".format(self.filename, count) backup = os.path.join(self.cwd, name) self.hey("Moving existing {} to {}".format(self.filename, name)) os.rename(os.path.join(self.cwd, self.filename), backup)
<SYSTEM_TASK:> a faster way for characters to generate token strings cache <END_TASK> <USER_TASK:> Description: def char_matcher(mode): """ a faster way for characters to generate token strings cache """
def f_raw(inp_str, pos): return mode if inp_str[pos] is mode else None def f_collection(inp_str, pos): ch = inp_str[pos] for each in mode: if ch is each: return ch return None if isinstance(mode, str): return f_raw if len(mode) is 1: mode = mode[0] return f_raw return f_collection
<SYSTEM_TASK:> Static method defined to update paystack customer data by id. <END_TASK> <USER_TASK:> Description: def update(cls, customer_id, **kwargs): """ Static method defined to update paystack customer data by id. Args: customer_id: paystack customer id. first_name: customer's first name(optional). last_name: customer's last name(optional). email: customer's email address(optional). phone:customer's phone number(optional). Returns: Json data from paystack API. """
return cls().requests.put('customer/{customer_id}'.format(**locals()), data=kwargs)
<SYSTEM_TASK:> Accepts Slack formatted text and returns HTML. <END_TASK> <USER_TASK:> Description: def render(txt): """ Accepts Slack formatted text and returns HTML. """
# Removing links to other channels txt = re.sub(r'<#[^\|]*\|(.*)>', r'#\g<1>', txt) # Removing links to other users txt = re.sub(r'<(@.*)>', r'\g<1>', txt) # handle named hyperlinks txt = re.sub(r'<([^\|]*)\|([^\|]*)>', r'<a href="\g<1>" target="blank">\g<2></a>', txt) # handle unnamed hyperlinks txt = re.sub(r'<([^a|/a].*)>', r'<a href="\g<1>" target="blank">\g<1></a>', txt) # handle ordered and unordered lists for delimeter in LIST_DELIMITERS: slack_tag = delimeter class_name = LIST_DELIMITERS[delimeter] # Wrap any lines that start with the slack_tag in <li></li> list_regex = u'(?:^|\n){}\s?(.*)'.format(slack_tag) list_repl = r'<li class="list-item-{}">\g<1></li>'.format(class_name) txt = re.sub(list_regex, list_repl, txt) # hanlde blockquotes txt = re.sub(u'(^|\n)(?:&gt;){3}\s?(.*)$', r'\g<1><blockquote>\g<2></blockquote>', txt, flags=re.DOTALL) txt = re.sub(u'(?:^|\n)&gt;\s?(.*)\n?', r'<blockquote>\g<1></blockquote>', txt) # handle code blocks txt = re.sub(r'```\n?(.*)```', r'<pre>\g<1></pre>', txt, flags=re.DOTALL) txt = re.sub(r'\n(</pre>)', r'\g<1>', txt) # handle bolding, italics, and strikethrough for wrapper in FORMATTERS: slack_tag = wrapper html_tag = FORMATTERS[wrapper] # Grab all text in formatted characters on the same line unless escaped regex = r'(?<!\\)\{t}([^\{t}|\n]*)\{t}'.format(t=slack_tag) repl = r'<{t}>\g<1></{t}>'.format(t=html_tag) txt = re.sub(regex, repl, txt) # convert line breaks txt = txt.replace('\n', '<br />') # clean up bad HTML parser = CustomSlackdownHTMLParser(txt) txt = parser.clean() # convert multiple spaces txt = txt.replace(r' ', ' &nbsp') return txt
<SYSTEM_TASK:> Add an open list tag corresponding to the specification in the <END_TASK> <USER_TASK:> Description: def _open_list(self, list_type): """ Add an open list tag corresponding to the specification in the parser's LIST_TYPES. """
if list_type in LIST_TYPES.keys(): tag = LIST_TYPES[list_type] else: raise Exception('CustomSlackdownHTMLParser:_open_list: Not a valid list type.') html = '<{t} class="list-container-{c}">'.format( t=tag, c=list_type ) self.cleaned_html += html self.current_parent_element['tag'] = LIST_TYPES[list_type] self.current_parent_element['attrs'] = {'class': list_type}
<SYSTEM_TASK:> Add an close list tag corresponding to the currently open <END_TASK> <USER_TASK:> Description: def _close_list(self): """ Add an close list tag corresponding to the currently open list found in current_parent_element. """
list_type = self.current_parent_element['attrs']['class'] tag = LIST_TYPES[list_type] html = '</{t}>'.format( t=tag ) self.cleaned_html += html self.current_parent_element['tag'] = '' self.current_parent_element['attrs'] = {}
<SYSTEM_TASK:> Called by HTMLParser.feed when a start tag is found. <END_TASK> <USER_TASK:> Description: def handle_starttag(self, tag, attrs): """ Called by HTMLParser.feed when a start tag is found. """
# Parse the tag attributes attrs_dict = dict(t for t in attrs) # If the tag is a predefined parent element if tag in PARENT_ELEMENTS: # If parser is parsing another parent element if self.current_parent_element['tag'] != '': # close the parent element self.cleaned_html += '</{}>'.format(self.current_parent_element['tag']) self.current_parent_element['tag'] = tag self.current_parent_element['attrs'] = {} self.cleaned_html += '<{}>'.format(tag) # If the tag is a list item elif tag == 'li': self.parsing_li = True # Parse the class name & subsequent type class_name = attrs_dict['class'] list_type = class_name[10:] # Check if parsing a list if self.current_parent_element['tag'] == 'ul' or self.current_parent_element['tag'] == 'ol': cur_list_type = self.current_parent_element['attrs']['class'] # Parsing a different list if cur_list_type != list_type: # Close that list self._close_list() # Open new list self._open_list(list_type) # Not parsing a list else: # if parsing some other parent if self.current_parent_element['tag'] != '': self.cleaned_html += '</{}>'.format(self.current_parent_element['tag']) # Open new list self._open_list(list_type) self.cleaned_html += '<{}>'.format(tag) # If the tag is a line break elif tag == 'br': # If parsing a paragraph, close it if self.current_parent_element['tag'] == 'p': self.cleaned_html += '</p>' self.current_parent_element['tag'] = '' self.current_parent_element['attrs'] = {} # If parsing a list, close it elif self.current_parent_element['tag'] == 'ul' or self.current_parent_element['tag'] == 'ol': self._close_list() # If parsing any other parent element, keep it elif self.current_parent_element['tag'] in PARENT_ELEMENTS: self.cleaned_html += '<br />' # If not in any parent element, create an empty paragraph else: self.cleaned_html += '<p></p>' # If the tag is something else, like a <b> or <i> tag else: # If not parsing any parent element if self.current_parent_element['tag'] == '': self.cleaned_html += '<p>' self.current_parent_element['tag'] = 'p' self.cleaned_html += '<{}'.format(tag) for attr in sorted(attrs_dict.keys()): self.cleaned_html += ' {k}="{v}"'.format( k=attr, v=attrs_dict[attr] ) self.cleaned_html += '>'
<SYSTEM_TASK:> Called by HTMLParser.feed when an end tag is found. <END_TASK> <USER_TASK:> Description: def handle_endtag(self, tag): """ Called by HTMLParser.feed when an end tag is found. """
if tag in PARENT_ELEMENTS: self.current_parent_element['tag'] = '' self.current_parent_element['attrs'] = '' if tag == 'li': self.parsing_li = True if tag != 'br': self.cleaned_html += '</{}>'.format(tag)
<SYSTEM_TASK:> Called by HTMLParser.feed when text is found. <END_TASK> <USER_TASK:> Description: def handle_data(self, data): """ Called by HTMLParser.feed when text is found. """
if self.current_parent_element['tag'] == '': self.cleaned_html += '<p>' self.current_parent_element['tag'] = 'p' self.cleaned_html += data
<SYSTEM_TASK:> Removes formatting tags added to pre elements. <END_TASK> <USER_TASK:> Description: def _remove_pre_formatting(self): """ Removes formatting tags added to pre elements. """
preformatted_wrappers = [ 'pre', 'code' ] for wrapper in preformatted_wrappers: for formatter in FORMATTERS: tag = FORMATTERS[formatter] character = formatter regex = r'(<{w}>.*)<{t}>(.*)</{t}>(.*</{w}>)'.format( t=tag, w=wrapper ) repl = r'\g<1>{c}\g<2>{c}\g<3>'.format(c=character) self.cleaned_html = re.sub(regex, repl, self.cleaned_html)
<SYSTEM_TASK:> Goes through the txt input and cleans up any problematic HTML. <END_TASK> <USER_TASK:> Description: def clean(self): """ Goes through the txt input and cleans up any problematic HTML. """
# Calls handle_starttag, handle_endtag, and handle_data self.feed() # Clean up any parent tags left open if self.current_parent_element['tag'] != '': self.cleaned_html += '</{}>'.format(self.current_parent_element['tag']) # Remove empty <p> added after lists self.cleaned_html = re.sub(r'(</[u|o]l>)<p></p>', r'\g<1>', self.cleaned_html) self._remove_pre_formatting() return self.cleaned_html
<SYSTEM_TASK:> Spectra to compare with models <END_TASK> <USER_TASK:> Description: def get_normed_spectra(): """ Spectra to compare with models """
wl = np.load("%s/wl.npz" %LAB_DIR)['arr_0'] filenames = np.array( [SPEC_DIR + "/Spectra" + "/" + val for val in lamost_id]) grid, fluxes, ivars, npix, SNRs = lamost.load_spectra( lamost_id, input_grid=wl) ds = dataset.Dataset( wl, lamost_id, fluxes, ivars, [1], lamost_id[0:2], fluxes[0:2], ivars[0:2]) ds.continuum_normalize_gaussian_smoothing(L=50) np.savez(SPEC_DIR + "/" + "norm_flux.npz", ds.tr_flux) np.savez(SPEC_DIR + "/" + "norm_ivar.npz", ds.tr_ivar) return ds.tr_flux, ds.tr_ivar
<SYSTEM_TASK:> Normalize according to The Cannon <END_TASK> <USER_TASK:> Description: def cannon_normalize(spec_raw): """ Normalize according to The Cannon """
spec = np.array([spec_raw]) wl = np.arange(0, spec.shape[1]) w = continuum_normalization.gaussian_weight_matrix(wl, L=50) ivar = np.ones(spec.shape)*0.5 cont = continuum_normalization._find_cont_gaussian_smooth( wl, spec, ivar, w) norm_flux, norm_ivar = continuum_normalization._cont_norm( spec, ivar, cont) return norm_flux[0]
<SYSTEM_TASK:> Resample spectrum onto desired grid <END_TASK> <USER_TASK:> Description: def resample(grid, wl, flux): """ Resample spectrum onto desired grid """
flux_rs = (interpolate.interp1d(wl, flux))(grid) return flux_rs
<SYSTEM_TASK:> Using the dataset and model object, calculate the residuals and return <END_TASK> <USER_TASK:> Description: def get_residuals(ds, m): """ Using the dataset and model object, calculate the residuals and return Parameters ---------- ds: dataset object m: model object Return ------ residuals: array of residuals, spec minus model spec """
model_spectra = get_model_spectra(ds, m) resid = ds.test_flux - model_spectra return resid
<SYSTEM_TASK:> Load the model <END_TASK> <USER_TASK:> Description: def load_model(): """ Load the model Parameters ---------- direc: directory with all of the model files Returns ------- m: model object """
direc = "/home/annaho/TheCannon/code/lamost/mass_age/cn" m = model.CannonModel(2) m.coeffs = np.load(direc + "/coeffs.npz")['arr_0'][0:3626,:] # no cols m.scatters = np.load(direc + "/scatters.npz")['arr_0'][0:3626] # no cols m.chisqs = np.load(direc + "/chisqs.npz")['arr_0'][0:3626] # no cols m.pivots = np.load(direc + "/pivots.npz")['arr_0'] return m
<SYSTEM_TASK:> Fit a Gaussian to the data <END_TASK> <USER_TASK:> Description: def fit_gaussian(x, y, yerr, p0): """ Fit a Gaussian to the data """
try: popt, pcov = curve_fit(gaussian, x, y, sigma=yerr, p0=p0, absolute_sigma=True) except RuntimeError: return [0],[0] return popt, pcov
<SYSTEM_TASK:> criteria for keeping an object <END_TASK> <USER_TASK:> Description: def select(yerrs, amps, amp_errs, widths): """ criteria for keeping an object """
keep_1 = np.logical_and(amps < 0, widths > 1) keep_2 = np.logical_and(np.abs(amps) > 3*yerrs, amp_errs < 3*np.abs(amps)) keep = np.logical_and(keep_1, keep_2) return keep
<SYSTEM_TASK:> Load the data that we're using to search for Li-rich giants. <END_TASK> <USER_TASK:> Description: def run_all(): """ Load the data that we're using to search for Li-rich giants. Store it in dataset and model objects. """
DATA_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels" dates = os.listdir("/home/share/LAMOST/DR2/DR2_release") dates = np.array(dates) dates = np.delete(dates, np.where(dates=='.directory')[0][0]) dates = np.delete(dates, np.where(dates=='all_folders.list')[0][0]) dates = np.delete(dates, np.where(dates=='dr2.lis')[0][0]) for date in dates: if glob.glob("*%s*.txt" %date): print("%s done" %date) else: print("running %s" %date) run_one_date(date)
<SYSTEM_TASK:> Pull colors from catalog <END_TASK> <USER_TASK:> Description: def get_colors(catalog): """ Pull colors from catalog Parameters ---------- catalog: filename """
print("Get Colors") a = pyfits.open(catalog) data = a[1].data a.close() all_ids = data['LAMOST_ID_1'] all_ids = np.array([val.strip() for val in all_ids]) # G magnitude gmag = data['gpmag'] gmag_err = data['e_gpmag'] # R magnitude rmag = data['rpmag'] rmag_err = data['e_rpmag'] # I magnitude imag = data['ipmag'] imag_err = data['e_ipmag'] # W1 W1 = data['W1mag'] W1_err = data['e_W1mag'] # W1 W2 = data['W2mag'] W2_err = data['e_W2mag'] # J magnitude Jmag = data['Jmag'] Jmag_err = data['e_Jmag'] # H magnitude Hmag = data['Hmag'] Hmag_err = data['e_Hmag'] # K magnitude Kmag = data['Kmag'] Kmag_err = data['e_Kmag'] # Stack mag = np.vstack(( gmag, rmag, imag, Jmag, Hmag, Kmag, W2, W1)) # 8, nobj mag_err = np.vstack(( gmag_err, rmag_err, imag_err, Jmag_err, Hmag_err, Kmag_err, W2_err, W1_err)) # Make g-r, r-i, i-J, etc col = mag[:-1] - mag[1:] col_ivar = 1/(mag_err[:-1]**2 + mag_err[1:]**2) # There's something wrong with the i-band, I think..so the second color r-i #bad = col[:,1] < 0.0 #col_ivar[bad] = 0.0 return all_ids, col, col_ivar
<SYSTEM_TASK:> Find and return continuum pixels given the flux and sigma cut <END_TASK> <USER_TASK:> Description: def _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars): """ Find and return continuum pixels given the flux and sigma cut Parameters ---------- f_cut: float the upper limit imposed on the quantity (fbar-1) sig_cut: float the upper limit imposed on the quantity (f_sig) wl: numpy ndarray of length npixels rest-frame wavelength vector fluxes: numpy ndarray of shape (nstars, npixels) pixel intensities ivars: numpy ndarray of shape nstars, npixels inverse variances, parallel to fluxes Returns ------- contmask: boolean mask of length npixels True indicates that the pixel is continuum """
f_bar = np.median(fluxes, axis=0) sigma_f = np.var(fluxes, axis=0) bad = np.logical_and(f_bar==0, sigma_f==0) cont1 = np.abs(f_bar-1) <= f_cut cont2 = sigma_f <= sig_cut contmask = np.logical_and(cont1, cont2) contmask[bad] = False return contmask
<SYSTEM_TASK:> Find continuum pix in spec, meeting a set target fraction <END_TASK> <USER_TASK:> Description: def _find_contpix(wl, fluxes, ivars, target_frac): """ Find continuum pix in spec, meeting a set target fraction Parameters ---------- wl: numpy ndarray rest-frame wavelength vector fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes target_frac: float the fraction of pixels in spectrum desired to be continuum Returns ------- contmask: boolean numpy ndarray True corresponds to continuum pixels """
print("Target frac: %s" %(target_frac)) bad1 = np.median(ivars, axis=0) == SMALL bad2 = np.var(ivars, axis=0) == 0 bad = np.logical_and(bad1, bad2) npixels = len(wl)-sum(bad) f_cut = 0.0001 stepsize = 0.0001 sig_cut = 0.0001 contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars) if npixels > 0: frac = sum(contmask)/float(npixels) else: frac = 0 while (frac < target_frac): f_cut += stepsize sig_cut += stepsize contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars) if npixels > 0: frac = sum(contmask)/float(npixels) else: frac = 0 if frac > 0.10*npixels: print("Warning: Over 10% of pixels identified as continuum.") print("%s out of %s pixels identified as continuum" %(sum(contmask), npixels)) print("Cuts: f_cut %s, sig_cut %s" %(f_cut, sig_cut)) return contmask
<SYSTEM_TASK:> Find continuum pix in a spectrum split into chunks <END_TASK> <USER_TASK:> Description: def _find_contpix_regions(wl, fluxes, ivars, frac, ranges): """ Find continuum pix in a spectrum split into chunks Parameters ---------- wl: numpy ndarray rest-frame wavelength vector fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes frac: float fraction of pixels in spectrum to be found as continuum ranges: list, array starts and ends indicating location of chunks in array Returns ------ contmask: numpy ndarray, boolean True indicates continuum pixel """
contmask = np.zeros(len(wl), dtype=bool) for chunk in ranges: start = chunk[0] stop = chunk[1] contmask[start:stop] = _find_contpix( wl[start:stop], fluxes[:,start:stop], ivars[:,start:stop], frac) return contmask
<SYSTEM_TASK:> Load the reference data, and assign each object <END_TASK> <USER_TASK:> Description: def group_data(): """ Load the reference data, and assign each object a random integer from 0 to 7. Save the IDs. """
tr_obj = np.load("%s/ref_id.npz" %direc_ref)['arr_0'] groups = np.random.randint(0, 8, size=len(tr_obj)) np.savez("ref_groups.npz", groups)
<SYSTEM_TASK:> Run the training step, given a dataset object. <END_TASK> <USER_TASK:> Description: def train(ds, ii): """ Run the training step, given a dataset object. """
print("Loading model") m = model.CannonModel(2) print("Training...") m.fit(ds) np.savez("./ex%s_coeffs.npz" %ii, m.coeffs) np.savez("./ex%s_scatters.npz" %ii, m.scatters) np.savez("./ex%s_chisqs.npz" %ii, m.chisqs) np.savez("./ex%s_pivots.npz" %ii, m.pivots) fig = m.diagnostics_leading_coeffs(ds) plt.savefig("ex%s_leading_coeffs.png" %ii) # m.diagnostics_leading_coeffs_triangle(ds) # m.diagnostics_plot_chisq(ds) return m
<SYSTEM_TASK:> Calculate standard deviation weighted by errors <END_TASK> <USER_TASK:> Description: def weighted_std(values, weights): """ Calculate standard deviation weighted by errors """
average = np.average(values, weights=weights) variance = np.average((values-average)**2, weights=weights) return np.sqrt(variance)
<SYSTEM_TASK:> Estimate the scatter in a region of the spectrum <END_TASK> <USER_TASK:> Description: def estimate_noise(fluxes, contmask): """ Estimate the scatter in a region of the spectrum taken to be continuum """
nstars = fluxes.shape[0] scatter = np.zeros(nstars) for i,spec in enumerate(fluxes): cont = spec[contmask] scatter[i] = stats.funcs.mad_std(cont) return scatter
<SYSTEM_TASK:> Pull out wl, flux, ivar from files of training spectra <END_TASK> <USER_TASK:> Description: def load_ref_spectra(): """ Pull out wl, flux, ivar from files of training spectra """
data_dir = "/Users/annaho/Data/AAOmega/ref_spectra" # Load the files & count the number of training objects ff = glob.glob("%s/*.txt" %data_dir) nstars = len(ff) print("We have %s training objects" %nstars) # Read the first file to get the wavelength array f = ff[0] data = Table.read(f, format="ascii.fast_no_header") wl = data['col1'] npix = len(wl) print("We have %s pixels" %npix) tr_flux = np.zeros((nstars,npix)) tr_ivar = np.zeros(tr_flux.shape) for i,f in enumerate(ff): data = Table.read(f, format="ascii.fast_no_header") flux = data['col2'] tr_flux[i,:] = flux sigma = data['col3'] tr_ivar[i,:] = 1.0 / sigma**2 return np.array(ff), wl, tr_flux, tr_ivar
<SYSTEM_TASK:> Use all the above functions to set data up for The Cannon <END_TASK> <USER_TASK:> Description: def load_data(): data_dir = "/Users/annaho/Data/AAOmega" out_dir = "%s/%s" %(data_dir, "Run_13_July") """ Use all the above functions to set data up for The Cannon """
ff, wl, tr_flux, tr_ivar = load_ref_spectra() """ pick one that doesn't have extra dead pixels """ skylines = tr_ivar[4,:] # should be the same across all obj np.savez("%s/skylines.npz" %out_dir, skylines) contmask = np.load("%s/contmask_regions.npz" %data_dir)['arr_0'] scatter = estimate_noise(tr_flux, contmask) ids, labels = load_labels() # Select the objects in the catalog corresponding to the files inds = [] ff_short = [] for fname in ff: val = fname.split("/")[-1] short = (val.split('.')[0] + '.' + val.split('.')[1]) ff_short.append(short) if short in ids: ind = np.where(ids==short)[0][0] inds.append(ind) # choose the labels tr_id = ids[inds] tr_label = labels[inds] # find the corresponding spectra ff_short = np.array(ff_short) inds = np.array([np.where(ff_short==val)[0][0] for val in tr_id]) tr_flux_choose = tr_flux[inds] tr_ivar_choose = tr_ivar[inds] scatter_choose = scatter[inds] np.savez("%s/wl.npz" %out_dir, wl) np.savez("%s/ref_id_all.npz" %out_dir, tr_id) np.savez("%s/ref_flux_all.npz" %out_dir, tr_flux_choose) np.savez("%s/ref_ivar_all.npz" %out_dir, tr_ivar_choose) np.savez("%s/ref_label_all.npz" %out_dir, tr_label) np.savez("%s/ref_spec_scat_all.npz" %out_dir, scatter_choose) # now, the test spectra test_id, test_flux = load_test_spectra() scatter = estimate_noise(test_flux, contmask) np.savez("%s/test_id.npz" %out_dir, test_id) np.savez("%s/test_flux.npz" %out_dir, test_flux) np.savez("%s/test_spec_scat.npz" %out_dir, scatter)
<SYSTEM_TASK:> take the scatters and skylines and make final ivars <END_TASK> <USER_TASK:> Description: def make_full_ivar(): """ take the scatters and skylines and make final ivars """
# skylines come as an ivar # don't use them for now, because I don't really trust them... # skylines = np.load("%s/skylines.npz" %DATA_DIR)['arr_0'] ref_flux = np.load("%s/ref_flux_all.npz" %DATA_DIR)['arr_0'] ref_scat = np.load("%s/ref_spec_scat_all.npz" %DATA_DIR)['arr_0'] test_flux = np.load("%s/test_flux.npz" %DATA_DIR)['arr_0'] test_scat = np.load("%s/test_spec_scat.npz" %DATA_DIR)['arr_0'] ref_ivar = np.ones(ref_flux.shape) / ref_scat[:,None]**2 test_ivar = np.ones(test_flux.shape) / test_scat[:,None]**2 # ref_ivar = (ref_ivar_temp * skylines[None,:]) / (ref_ivar_temp + skylines) # test_ivar = (test_ivar_temp * skylines[None,:]) / (test_ivar_temp + skylines) ref_bad = np.logical_or(ref_flux <= 0, ref_flux > 1.1) test_bad = np.logical_or(test_flux <= 0, test_flux > 1.1) SMALL = 1.0 / 1000000000.0 ref_ivar[ref_bad] = SMALL test_ivar[test_bad] = SMALL np.savez("%s/ref_ivar_corr.npz" %DATA_DIR, ref_ivar) np.savez("%s/test_ivar_corr.npz" %DATA_DIR, test_ivar)
<SYSTEM_TASK:> Return the sinusoid cont func evaluated at input x for the continuum. <END_TASK> <USER_TASK:> Description: def _sinusoid(x, p, L, y): """ Return the sinusoid cont func evaluated at input x for the continuum. Parameters ---------- x: float or np.array data, input to function p: ndarray coefficients of fitting function L: float width of x data y: float or np.array output data corresponding to input x Returns ------- func: float function evaluated for the input x """
N = int(len(p)/2) n = np.linspace(0, N, N+1) k = n*np.pi/L func = 0 for n in range(0, N): func += p[2*n]*np.sin(k[n]*x)+p[2*n+1]*np.cos(k[n]*x) return func
<SYSTEM_TASK:> Calculate a weighted median for values above a particular quantile cut <END_TASK> <USER_TASK:> Description: def _weighted_median(values, weights, quantile): """ Calculate a weighted median for values above a particular quantile cut Used in pseudo continuum normalization Parameters ---------- values: np ndarray of floats the values to take the median of weights: np ndarray of floats the weights associated with the values quantile: float the cut applied to the input data Returns ------ the weighted median """
sindx = np.argsort(values) cvalues = 1. * np.cumsum(weights[sindx]) if cvalues[-1] == 0: # means all the values are 0 return values[0] cvalues = cvalues / cvalues[-1] # div by largest value foo = sindx[cvalues > quantile] if len(foo) == 0: return values[0] indx = foo[0] return values[indx]
<SYSTEM_TASK:> Returns the weighted mean block of spectra <END_TASK> <USER_TASK:> Description: def _find_cont_gaussian_smooth(wl, fluxes, ivars, w): """ Returns the weighted mean block of spectra Parameters ---------- wl: numpy ndarray wavelength vector flux: numpy ndarray block of flux values ivar: numpy ndarray block of ivar values L: float width of Gaussian used to assign weights Returns ------- smoothed_fluxes: numpy ndarray block of smoothed flux values, mean spectra """
print("Finding the continuum") bot = np.dot(ivars, w.T) top = np.dot(fluxes*ivars, w.T) bad = bot == 0 cont = np.zeros(top.shape) cont[~bad] = top[~bad] / bot[~bad] return cont
<SYSTEM_TASK:> Continuum normalize by dividing by a Gaussian-weighted smoothed spectrum <END_TASK> <USER_TASK:> Description: def _cont_norm_gaussian_smooth(dataset, L): """ Continuum normalize by dividing by a Gaussian-weighted smoothed spectrum Parameters ---------- dataset: Dataset the dataset to continuum normalize L: float the width of the Gaussian used for weighting Returns ------- dataset: Dataset updated dataset """
print("Gaussian smoothing the entire dataset...") w = gaussian_weight_matrix(dataset.wl, L) print("Gaussian smoothing the training set") cont = _find_cont_gaussian_smooth( dataset.wl, dataset.tr_flux, dataset.tr_ivar, w) norm_tr_flux, norm_tr_ivar = _cont_norm( dataset.tr_flux, dataset.tr_ivar, cont) print("Gaussian smoothing the test set") cont = _find_cont_gaussian_smooth( dataset.wl, dataset.test_flux, dataset.test_ivar, w) norm_test_flux, norm_test_ivar = _cont_norm( dataset.test_flux, dataset.test_ivar, cont) return norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar
<SYSTEM_TASK:> Fit a continuum to a continuum pixels in a segment of spectra <END_TASK> <USER_TASK:> Description: def _find_cont_fitfunc(fluxes, ivars, contmask, deg, ffunc, n_proc=1): """ Fit a continuum to a continuum pixels in a segment of spectra Functional form can be either sinusoid or chebyshev, with specified degree Parameters ---------- fluxes: numpy ndarray of shape (nstars, npixels) training set or test set pixel intensities ivars: numpy ndarray of shape (nstars, npixels) inverse variances, parallel to fluxes contmask: numpy ndarray of length (npixels) boolean pixel mask, True indicates that pixel is continuum deg: int degree of fitting function ffunc: str type of fitting function, chebyshev or sinusoid Returns ------- cont: numpy ndarray of shape (nstars, npixels) the continuum, parallel to fluxes """
nstars = fluxes.shape[0] npixels = fluxes.shape[1] cont = np.zeros(fluxes.shape) if n_proc == 1: for jj in range(nstars): flux = fluxes[jj,:] ivar = ivars[jj,:] pix = np.arange(0, npixels) y = flux[contmask] x = pix[contmask] yivar = ivar[contmask] yivar[yivar == 0] = SMALL**2 if ffunc=="sinusoid": p0 = np.ones(deg*2) # one for cos, one for sin L = max(x)-min(x) pcont_func = _partial_func(_sinusoid, L=L, y=flux) popt, pcov = opt.curve_fit(pcont_func, x, y, p0=p0, sigma=1./np.sqrt(yivar)) elif ffunc=="chebyshev": fit = np.polynomial.chebyshev.Chebyshev.fit(x=x,y=y,w=yivar,deg=deg) for element in pix: if ffunc=="sinusoid": cont[jj,element] = _sinusoid(element, popt, L=L, y=flux) elif ffunc=="chebyshev": cont[jj,element] = fit(element) else: # start mp.Pool pool = mp.Pool(processes=n_proc) mp_results = [] for i in xrange(nstars): mp_results.append(pool.apply_async(\ _find_cont_fitfunc, (fluxes[i, :].reshape((1, -1)), ivars[i, :].reshape((1, -1)), contmask[:]), {'deg':deg, 'ffunc':ffunc})) # close mp.Pool pool.close() pool.join() cont = np.array([mp_results[i].get().flatten() for i in xrange(nstars)]) return cont
<SYSTEM_TASK:> Run fit_cont, dealing with spectrum in regions or chunks <END_TASK> <USER_TASK:> Description: def _find_cont_fitfunc_regions(fluxes, ivars, contmask, deg, ranges, ffunc, n_proc=1): """ Run fit_cont, dealing with spectrum in regions or chunks This is useful if a spectrum has gaps. Parameters ---------- fluxes: ndarray of shape (nstars, npixels) training set or test set pixel intensities ivars: numpy ndarray of shape (nstars, npixels) inverse variances, parallel to fluxes contmask: numpy ndarray of length (npixels) boolean pixel mask, True indicates that pixel is continuum deg: int degree of fitting function ffunc: str type of fitting function, chebyshev or sinusoid Returns ------- cont: numpy ndarray of shape (nstars, npixels) the continuum, parallel to fluxes """
nstars = fluxes.shape[0] npixels = fluxes.shape[1] cont = np.zeros(fluxes.shape) for chunk in ranges: start = chunk[0] stop = chunk[1] if ffunc=="chebyshev": output = _find_cont_fitfunc(fluxes[:,start:stop], ivars[:,start:stop], contmask[start:stop], deg=deg, ffunc="chebyshev", n_proc=n_proc) elif ffunc=="sinusoid": output = _find_cont_fitfunc(fluxes[:,start:stop], ivars[:,start:stop], contmask[start:stop], deg=deg, ffunc="sinusoid", n_proc=n_proc) cont[:, start:stop] = output return cont
<SYSTEM_TASK:> Perform continuum normalization using a running quantile <END_TASK> <USER_TASK:> Description: def _find_cont_running_quantile(wl, fluxes, ivars, q, delta_lambda, verbose=False): """ Perform continuum normalization using a running quantile Parameters ---------- wl: numpy ndarray wavelength vector fluxes: numpy ndarray of shape (nstars, npixels) pixel intensities ivars: numpy ndarray of shape (nstars, npixels) inverse variances, parallel to fluxes q: float the desired quantile cut delta_lambda: int the number of pixels over which the median is calculated Output ------ norm_fluxes: numpy ndarray of shape (nstars, npixels) normalized pixel intensities norm_ivars: numpy ndarray of shape (nstars, npixels) rescaled pixel invariances """
cont = np.zeros(fluxes.shape) nstars = fluxes.shape[0] for jj in range(nstars): if verbose: print("cont_norm_q(): working on star [%s/%s]..." % (jj+1, nstars)) flux = fluxes[jj,:] ivar = ivars[jj,:] for ll, lam in enumerate(wl): indx = (np.where(abs(wl-lam) < delta_lambda))[0] flux_cut = flux[indx] ivar_cut = ivar[indx] cont[jj, ll] = _weighted_median(flux_cut, ivar_cut, q) return cont
<SYSTEM_TASK:> Perform continuum normalization using running quantile, for spectrum <END_TASK> <USER_TASK:> Description: def _cont_norm_running_quantile_regions(wl, fluxes, ivars, q, delta_lambda, ranges, verbose=True): """ Perform continuum normalization using running quantile, for spectrum that comes in chunks """
print("contnorm.py: continuum norm using running quantile") print("Taking spectra in %s chunks" % len(ranges)) nstars = fluxes.shape[0] norm_fluxes = np.zeros(fluxes.shape) norm_ivars = np.zeros(ivars.shape) for chunk in ranges: start = chunk[0] stop = chunk[1] output = _cont_norm_running_quantile( wl[start:stop], fluxes[:,start:stop], ivars[:,start:stop], q, delta_lambda) norm_fluxes[:,start:stop] = output[0] norm_ivars[:,start:stop] = output[1] return norm_fluxes, norm_ivars
<SYSTEM_TASK:> Perform continuum normalization using running quantile, for spectrum <END_TASK> <USER_TASK:> Description: def _cont_norm_running_quantile_regions_mp(wl, fluxes, ivars, q, delta_lambda, ranges, n_proc=2, verbose=False): """ Perform continuum normalization using running quantile, for spectrum that comes in chunks. The same as _cont_norm_running_quantile_regions(), but using multi-processing. Bo Zhang (NAOC) """
print("contnorm.py: continuum norm using running quantile") print("Taking spectra in %s chunks" % len(ranges)) # nstars = fluxes.shape[0] nchunks = len(ranges) norm_fluxes = np.zeros(fluxes.shape) norm_ivars = np.zeros(ivars.shape) for i in xrange(nchunks): chunk = ranges[i, :] start = chunk[0] stop = chunk[1] if verbose: print('@Bo Zhang: Going to normalize Chunk [%d/%d], pixel:[%d, %d] ...' % (i+1, nchunks, start, stop)) output = _cont_norm_running_quantile_mp( wl[start:stop], fluxes[:, start:stop], ivars[:, start:stop], q, delta_lambda, n_proc=n_proc, verbose=verbose) norm_fluxes[:, start:stop] = output[0] norm_ivars[:, start:stop] = output[1] return norm_fluxes, norm_ivars
<SYSTEM_TASK:> Continuum-normalize a continuous segment of spectra. <END_TASK> <USER_TASK:> Description: def _cont_norm(fluxes, ivars, cont): """ Continuum-normalize a continuous segment of spectra. Parameters ---------- fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes contmask: boolean mask True indicates that pixel is continuum Returns ------- norm_fluxes: numpy ndarray normalized pixel intensities norm_ivars: numpy ndarray rescaled inverse variances """
nstars = fluxes.shape[0] npixels = fluxes.shape[1] norm_fluxes = np.ones(fluxes.shape) norm_ivars = np.zeros(ivars.shape) bad = cont == 0. norm_fluxes = np.ones(fluxes.shape) norm_fluxes[~bad] = fluxes[~bad] / cont[~bad] norm_ivars = cont**2 * ivars return norm_fluxes, norm_ivars
<SYSTEM_TASK:> Perform continuum normalization for spectra in chunks <END_TASK> <USER_TASK:> Description: def _cont_norm_regions(fluxes, ivars, cont, ranges): """ Perform continuum normalization for spectra in chunks Useful for spectra that have gaps Parameters --------- fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes cont: numpy ndarray the continuum ranges: list or np ndarray the chunks that the spectrum should be split into Returns ------- norm_fluxes: numpy ndarray normalized pixel intensities norm_ivars: numpy ndarray rescaled inverse variances """
nstars = fluxes.shape[0] norm_fluxes = np.zeros(fluxes.shape) norm_ivars = np.zeros(ivars.shape) for chunk in ranges: start = chunk[0] stop = chunk[1] output = _cont_norm(fluxes[:,start:stop], ivars[:,start:stop], cont[:,start:stop]) norm_fluxes[:,start:stop] = output[0] norm_ivars[:,start:stop] = output[1] for jj in range(nstars): bad = (norm_ivars[jj,:] == 0.) norm_fluxes[jj,:][bad] = 1. return norm_fluxes, norm_ivars
<SYSTEM_TASK:> Plot baseline spec with continuum pix overlaid <END_TASK> <USER_TASK:> Description: def plot_contpix(self, x, y, contpix_x, contpix_y, figname): """ Plot baseline spec with continuum pix overlaid Parameters ---------- """
fig, axarr = plt.subplots(2, sharex=True) plt.xlabel(r"Wavelength $\lambda (\AA)$") plt.xlim(min(x), max(x)) ax = axarr[0] ax.step(x, y, where='mid', c='k', linewidth=0.3, label=r'$\theta_0$' + "= the leading fit coefficient") ax.scatter(contpix_x, contpix_y, s=1, color='r', label="continuum pixels") ax.legend(loc='lower right', prop={'family':'serif', 'size':'small'}) ax.set_title("Baseline Spectrum with Continuum Pixels") ax.set_ylabel(r'$\theta_0$') ax = axarr[1] ax.step(x, y, where='mid', c='k', linewidth=0.3, label=r'$\theta_0$' + "= the leading fit coefficient") ax.scatter(contpix_x, contpix_y, s=1, color='r', label="continuum pixels") ax.set_title("Baseline Spectrum with Continuum Pixels, Zoomed") ax.legend(loc='upper right', prop={'family':'serif', 'size':'small'}) ax.set_ylabel(r'$\theta_0$') ax.set_ylim(0.95, 1.05) print("Diagnostic plot: fitted 0th order spec w/ cont pix") print("Saved as %s.png" % (figname)) plt.savefig(figname) plt.close()
<SYSTEM_TASK:> Call plot_contpix once for each nth of the spectrum <END_TASK> <USER_TASK:> Description: def diagnostics_contpix(self, data, nchunks=10, fig = "baseline_spec_with_cont_pix"): """ Call plot_contpix once for each nth of the spectrum """
if data.contmask is None: print("No contmask set") else: coeffs_all = self.coeffs wl = data.wl baseline_spec = coeffs_all[:,0] contmask = data.contmask contpix_x = wl[contmask] contpix_y = baseline_spec[contmask] rem = len(wl)%nchunks wl_split = np.array(np.split(wl[0:len(wl)-rem],nchunks)) baseline_spec_split = np.array( np.split(baseline_spec[0:len(wl)-rem],nchunks)) nchunks = wl_split.shape[0] for i in range(nchunks): fig_chunk = fig + "_%s" %str(i) wl_chunk = wl_split[i,:] baseline_spec_chunk = baseline_spec_split[i,:] take = np.logical_and( contpix_x>wl_chunk[0], contpix_x<wl_chunk[-1]) self.plot_contpix( wl_chunk, baseline_spec_chunk, contpix_x[take], contpix_y[take], fig_chunk)
<SYSTEM_TASK:> Produce a set of diagnostic plots for the model <END_TASK> <USER_TASK:> Description: def diagnostics_plot_chisq(self, ds, figname = "modelfit_chisqs.png"): """ Produce a set of diagnostic plots for the model Parameters ---------- (optional) chisq_dist_plot_name: str Filename of output saved plot """
label_names = ds.get_plotting_labels() lams = ds.wl pivots = self.pivots npixels = len(lams) nlabels = len(pivots) chisqs = self.chisqs coeffs = self.coeffs scatters = self.scatters # Histogram of the chi squareds of ind. stars plt.hist(np.sum(chisqs, axis=0), color='lightblue', alpha=0.7, bins=int(np.sqrt(len(chisqs)))) dof = len(lams) - coeffs.shape[1] # for one star plt.axvline(x=dof, c='k', linewidth=2, label="DOF") plt.legend() plt.title("Distribution of " + r"$\chi^2$" + " of the Model Fit") plt.ylabel("Count") plt.xlabel(r"$\chi^2$" + " of Individual Star") print("Diagnostic plot: histogram of the red chi squareds of the fit") print("Saved as %s" %figname) plt.savefig(figname) plt.close()
<SYSTEM_TASK:> avg dist from one lamost point to nearest 10 training points <END_TASK> <USER_TASK:> Description: def calc_dist(lamost_point, training_points, coeffs): """ avg dist from one lamost point to nearest 10 training points """
diff2 = (training_points - lamost_point)**2 dist = np.sqrt(np.sum(diff2*coeffs, axis=1)) return np.mean(dist[dist.argsort()][0:10])
<SYSTEM_TASK:> check if a variable is present in the environmental variables <END_TASK> <USER_TASK:> Description: def _check_environ(variable, value): """check if a variable is present in the environmental variables"""
if is_not_none(value): return value else: value = os.environ.get(variable) if is_none(value): stop(''.join([variable, """ not supplied and no entry in environmental variables"""])) else: return value
<SYSTEM_TASK:> Spin up a download request for GBIF occurrence data. <END_TASK> <USER_TASK:> Description: def download(queries, user=None, pwd=None, email=None, pred_type='and'): """ Spin up a download request for GBIF occurrence data. :param queries: One or more of query arguments to kick of a download job. See Details. :type queries: str or list :param pred_type: (character) One of ``equals`` (``=``), ``and`` (``&``), `or`` (``|``), ``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``), ``greaterThan`` (``>``), ``greaterThanOrEquals`` (``>=``), ``in``, ``within``, ``not`` (``!``), ``like`` :param user: (character) User name within GBIF's website. Required. Set in your env vars with the option ``GBIF_USER`` :param pwd: (character) User password within GBIF's website. Required. Set in your env vars with the option ``GBIF_PWD`` :param email: (character) Email address to recieve download notice done email. Required. Set in your env vars with the option ``GBIF_EMAIL`` Argument passed have to be passed as character (e.g., ``country = US``), with a space between key (``country``), operator (``=``), and value (``US``). See the ``type`` parameter for possible options for the operator. This character string is parsed internally. Acceptable arguments to ``...`` (args) are: - taxonKey = ``TAXON_KEY`` - scientificName = ``SCIENTIFIC_NAME`` - country = ``COUNTRY`` - publishingCountry = ``PUBLISHING_COUNTRY`` - hasCoordinate = ``HAS_COORDINATE`` - hasGeospatialIssue = ``HAS_GEOSPATIAL_ISSUE`` - typeStatus = ``TYPE_STATUS`` - recordNumber = ``RECORD_NUMBER`` - lastInterpreted = ``LAST_INTERPRETED`` - continent = ``CONTINENT`` - geometry = ``GEOMETRY`` - basisOfRecord = ``BASIS_OF_RECORD`` - datasetKey = ``DATASET_KEY`` - eventDate = ``EVENT_DATE`` - catalogNumber = ``CATALOG_NUMBER`` - year = ``YEAR`` - month = ``MONTH`` - decimalLatitude = ``DECIMAL_LATITUDE`` - decimalLongitude = ``DECIMAL_LONGITUDE`` - elevation = ``ELEVATION`` - depth = ``DEPTH`` - institutionCode = ``INSTITUTION_CODE`` - collectionCode = ``COLLECTION_CODE`` - issue = ``ISSUE`` - mediatype = ``MEDIA_TYPE`` - recordedBy = ``RECORDED_BY`` - repatriated = ``REPATRIATED`` See the API docs http://www.gbif.org/developer/occurrence#download for more info, and the predicates docs http://www.gbif.org/developer/occurrence#predicates GBIF has a limit of 12,000 characters for download queries - so if you're download request is really, really long and complex, consider breaking it up into multiple requests by one factor or another. :return: A dictionary, of results Usage:: from pygbif import occurrences as occ occ.download('basisOfRecord = LITERATURE') occ.download('taxonKey = 3119195') occ.download('decimalLatitude > 50') occ.download('elevation >= 9000') occ.download('decimalLatitude >= 65') occ.download('country = US') occ.download('institutionCode = TLMF') occ.download('catalogNumber = Bird.27847588') res = occ.download(['taxonKey = 7264332', 'hasCoordinate = TRUE']) # pass output to download_meta for more information occ.download_meta(occ.download('decimalLatitude > 75')) # Multiple queries gg = occ.download(['decimalLatitude >= 65', 'decimalLatitude <= -65'], type='or') gg = occ.download(['depth = 80', 'taxonKey = 2343454'], type='or') # Repratriated data for Costa Rica occ.download(['country = CR', 'repatriated = true']) """
user = _check_environ('GBIF_USER', user) pwd = _check_environ('GBIF_PWD', pwd) email = _check_environ('GBIF_EMAIL', email) if isinstance(queries, str): queries = [queries] keyval = [_parse_args(z) for z in queries] # USE GBIFDownload class to set up the predicates req = GbifDownload(user, email) req.main_pred_type = pred_type for predicate in keyval: req.add_predicate(predicate['key'], predicate['value'], predicate['type']) out = req.post_download(user, pwd) return out, req.payload
<SYSTEM_TASK:> Lists the downloads created by a user. <END_TASK> <USER_TASK:> Description: def download_list(user=None, pwd=None, limit=20, offset=0): """ Lists the downloads created by a user. :param user: [str] A user name, look at env var ``GBIF_USER`` first :param pwd: [str] Your password, look at env var ``GBIF_PWD`` first :param limit: [int] Number of records to return. Default: ``20`` :param offset: [int] Record number to start at. Default: ``0`` Usage:: from pygbif import occurrences as occ occ.download_list(user = "sckott") occ.download_list(user = "sckott", limit = 5) occ.download_list(user = "sckott", offset = 21) """
user = _check_environ('GBIF_USER', user) pwd = _check_environ('GBIF_PWD', pwd) url = 'http://api.gbif.org/v1/occurrence/download/user/' + user args = {'limit': limit, 'offset': offset} res = gbif_GET(url, args, auth=(user, pwd)) return {'meta': {'offset': res['offset'], 'limit': res['limit'], 'endofrecords': res['endOfRecords'], 'count': res['count']}, 'results': res['results']}