text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Return an JavaRDD of Object by unpickling <END_TASK> <USER_TASK:> Description: def _to_java_object_rdd(rdd): """ Return an JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not. """
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer())) return rdd.ctx._jvm.org.apache.spark.ml.python.MLSerDe.pythonToJava(rdd._jrdd, True)
<SYSTEM_TASK:> Return the broadcasted value <END_TASK> <USER_TASK:> Description: def value(self): """ Return the broadcasted value """
if not hasattr(self, "_value") and self._path is not None: # we only need to decrypt it here when encryption is enabled and # if its on the driver, since executor decryption is handled already if self._sc is not None and self._sc._encryption_enabled: port, auth_secret = self._python_broadcast.setupDecryptionServer() (decrypted_sock_file, _) = local_connect_and_auth(port, auth_secret) self._python_broadcast.waitTillBroadcastDataSent() return self.load(decrypted_sock_file) else: self._value = self.load_from_path(self._path) return self._value
<SYSTEM_TASK:> Delete cached copies of this broadcast on the executors. If the <END_TASK> <USER_TASK:> Description: def unpersist(self, blocking=False): """ Delete cached copies of this broadcast on the executors. If the broadcast is used after this is called, it will need to be re-sent to each executor. :param blocking: Whether to block until unpersisting has completed """
if self._jbroadcast is None: raise Exception("Broadcast can only be unpersisted in driver") self._jbroadcast.unpersist(blocking)
<SYSTEM_TASK:> Destroy all data and metadata related to this broadcast variable. <END_TASK> <USER_TASK:> Description: def destroy(self, blocking=False): """ Destroy all data and metadata related to this broadcast variable. Use this with caution; once a broadcast variable has been destroyed, it cannot be used again. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted. """
if self._jbroadcast is None: raise Exception("Broadcast can only be destroyed in driver") self._jbroadcast.destroy(blocking) os.unlink(self._path)
<SYSTEM_TASK:> Wrap this udf with a function and attach docstring from func <END_TASK> <USER_TASK:> Description: def _wrapped(self): """ Wrap this udf with a function and attach docstring from func """
# It is possible for a callable instance without __name__ attribute or/and # __module__ attribute to be wrapped here. For example, functools.partial. In this case, # we should avoid wrapping the attributes from the wrapped function to the wrapper # function. So, we take out these attribute names from the default names to set and # then manually assign it after being wrapped. assignments = tuple( a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__') @functools.wraps(self.func, assigned=assignments) def wrapper(*args): return self(*args) wrapper.__name__ = self._name wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__') else self.func.__class__.__module__) wrapper.func = self.func wrapper.returnType = self.returnType wrapper.evalType = self.evalType wrapper.deterministic = self.deterministic wrapper.asNondeterministic = functools.wraps( self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped()) return wrapper
<SYSTEM_TASK:> Register a Java user-defined function as a SQL function. <END_TASK> <USER_TASK:> Description: def registerJavaFunction(self, name, javaClassName, returnType=None): """Register a Java user-defined function as a SQL function. In addition to a name and the function itself, the return type can be optionally specified. When the return type is not specified we would infer it via reflection. :param name: name of the user-defined function :param javaClassName: fully qualified name of java class :param returnType: the return type of the registered Java function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. >>> from pyspark.sql.types import IntegerType >>> spark.udf.registerJavaFunction( ... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType()) >>> spark.sql("SELECT javaStringLength('test')").collect() [Row(UDF:javaStringLength(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength") >>> spark.sql("SELECT javaStringLength2('test')").collect() [Row(UDF:javaStringLength2(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer") >>> spark.sql("SELECT javaStringLength3('test')").collect() [Row(UDF:javaStringLength3(test)=4)] """
jdt = None if returnType is not None: if not isinstance(returnType, DataType): returnType = _parse_datatype_string(returnType) jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json()) self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
<SYSTEM_TASK:> Register a Java user-defined aggregate function as a SQL function. <END_TASK> <USER_TASK:> Description: def registerJavaUDAF(self, name, javaClassName): """Register a Java user-defined aggregate function as a SQL function. :param name: name of the user-defined aggregate function :param javaClassName: fully qualified name of java class >>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg") >>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"]) >>> df.createOrReplaceTempView("df") >>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect() [Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)] """
self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
<SYSTEM_TASK:> Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. <END_TASK> <USER_TASK:> Description: def getOrCreate(cls, checkpointPath, setupFunc): """ Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be recreated from the checkpoint data. If the data does not exist, then the provided setupFunc will be used to create a new context. @param checkpointPath: Checkpoint directory used in an earlier streaming program @param setupFunc: Function to create a new context and setup DStreams """
cls._ensure_initialized() gw = SparkContext._gateway # Check whether valid checkpoint information exists in the given path ssc_option = gw.jvm.StreamingContextPythonHelper().tryRecoverFromCheckpoint(checkpointPath) if ssc_option.isEmpty(): ssc = setupFunc() ssc.checkpoint(checkpointPath) return ssc jssc = gw.jvm.JavaStreamingContext(ssc_option.get()) # If there is already an active instance of Python SparkContext use it, or create a new one if not SparkContext._active_spark_context: jsc = jssc.sparkContext() conf = SparkConf(_jconf=jsc.getConf()) SparkContext(conf=conf, gateway=gw, jsc=jsc) sc = SparkContext._active_spark_context # update ctx in serializer cls._transformerSerializer.ctx = sc return StreamingContext(sc, None, jssc)
<SYSTEM_TASK:> Wait for the execution to stop. <END_TASK> <USER_TASK:> Description: def awaitTermination(self, timeout=None): """ Wait for the execution to stop. @param timeout: time to wait in seconds """
if timeout is None: self._jssc.awaitTermination() else: self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
<SYSTEM_TASK:> Stop the execution of the streams, with option of ensuring all <END_TASK> <USER_TASK:> Description: def stop(self, stopSparkContext=True, stopGraceFully=False): """ Stop the execution of the streams, with option of ensuring all received data has been processed. @param stopSparkContext: Stop the associated SparkContext or not @param stopGracefully: Stop gracefully by waiting for the processing of all received data to be completed """
self._jssc.stop(stopSparkContext, stopGraceFully) StreamingContext._activeContext = None if stopSparkContext: self._sc.stop()
<SYSTEM_TASK:> Create an input stream that monitors a Hadoop-compatible file system <END_TASK> <USER_TASK:> Description: def textFileStream(self, directory): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as text files. Files must be wrriten to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. The text files must be encoded as UTF-8. """
return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
<SYSTEM_TASK:> Create an input stream that monitors a Hadoop-compatible file system <END_TASK> <USER_TASK:> Description: def binaryRecordsStream(self, directory, recordLength): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as flat binary files with records of fixed length. Files must be written to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. @param directory: Directory to load data from @param recordLength: Length of each record in bytes """
return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self, NoOpSerializer())
<SYSTEM_TASK:> Create an input stream from a queue of RDDs or list. In each batch, <END_TASK> <USER_TASK:> Description: def queueStream(self, rdds, oneAtATime=True, default=None): """ Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. @param rdds: Queue of RDDs @param oneAtATime: pick one rdd each time or pick all of them once. @param default: The default rdd if no more in rdds """
if default and not isinstance(default, RDD): default = self._sc.parallelize(default) if not rdds and default: rdds = [rdds] if rdds and not isinstance(rdds[0], RDD): rdds = [self._sc.parallelize(input) for input in rdds] self._check_serializers(rdds) queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds]) if default: default = default._reserialize(rdds[0]._jrdd_deserializer) jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd) else: jdstream = self._jssc.queueStream(queue, oneAtATime) return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
<SYSTEM_TASK:> Create a new DStream in which each RDD is generated by applying <END_TASK> <USER_TASK:> Description: def transform(self, dstreams, transformFunc): """ Create a new DStream in which each RDD is generated by applying a function on RDDs of the DStreams. The order of the JavaRDDs in the transform function parameter will be the same as the order of corresponding DStreams in the list. """
jdstreams = [d._jdstream for d in dstreams] # change the final serializer to sc.serializer func = TransformFunction(self._sc, lambda t, *rdds: transformFunc(rdds), *[d._jrdd_deserializer for d in dstreams]) jfunc = self._jvm.TransformFunction(func) jdstream = self._jssc.transform(jdstreams, jfunc) return DStream(jdstream, self, self._sc.serializer)
<SYSTEM_TASK:> Create a unified DStream from multiple DStreams of the same <END_TASK> <USER_TASK:> Description: def union(self, *dstreams): """ Create a unified DStream from multiple DStreams of the same type and same slide duration. """
if not dstreams: raise ValueError("should have at least one DStream to union") if len(dstreams) == 1: return dstreams[0] if len(set(s._jrdd_deserializer for s in dstreams)) > 1: raise ValueError("All DStreams should have same serializer") if len(set(s._slideDuration for s in dstreams)) > 1: raise ValueError("All DStreams should have same slide duration") cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream jdstreams = SparkContext._gateway.new_array(cls, len(dstreams)) for i in range(0, len(dstreams)): jdstreams[i] = dstreams[i]._jdstream return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer)
<SYSTEM_TASK:> Loads a data file into a list of `InputFeature`s. <END_TASK> <USER_TASK:> Description: def convert_examples_to_features(examples, seq_length, tokenizer): """Loads a data file into a list of `InputFeature`s."""
features = [] for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > seq_length - 2: tokens_a = tokens_a[0:(seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambigiously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] input_type_ids = [] tokens.append("[CLS]") input_type_ids.append(0) for token in tokens_a: tokens.append(token) input_type_ids.append(0) tokens.append("[SEP]") input_type_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) input_type_ids.append(1) tokens.append("[SEP]") input_type_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < seq_length: input_ids.append(0) input_mask.append(0) input_type_ids.append(0) assert len(input_ids) == seq_length assert len(input_mask) == seq_length assert len(input_type_ids) == seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("unique_id: %s" % (example.unique_id)) logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids])) features.append( InputFeatures( unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids)) return features
<SYSTEM_TASK:> Read a list of `InputExample`s from an input file. <END_TASK> <USER_TASK:> Description: def read_examples(input_file): """Read a list of `InputExample`s from an input file."""
examples = [] unique_id = 0 with open(input_file, "r", encoding='utf-8') as reader: while True: line = reader.readline() if not line: break line = line.strip() text_a = None text_b = None m = re.match(r"^(.*) \|\|\| (.*)$", line) if m is None: text_a = line else: text_a = m.group(1) text_b = m.group(2) examples.append( InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)) unique_id += 1 return examples
<SYSTEM_TASK:> Read a SQuAD json file into a list of SquadExample. <END_TASK> <USER_TASK:> Description: def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if version_2_with_negative: is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples
<SYSTEM_TASK:> Returns tokenized answer spans that better match the annotated answer. <END_TASK> <USER_TASK:> Description: def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end)
<SYSTEM_TASK:> Check if this is the 'max context' doc span for the token. <END_TASK> <USER_TASK:> Description: def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index
<SYSTEM_TASK:> Get the n-best logits from a list. <END_TASK> <USER_TASK:> Description: def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes
<SYSTEM_TASK:> Compute softmax probability over raw logits. <END_TASK> <USER_TASK:> Description: def _compute_softmax(scores): """Compute softmax probability over raw logits."""
if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs
<SYSTEM_TASK:> Reads a tab separated value file. <END_TASK> <USER_TASK:> Description: def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines
<SYSTEM_TASK:> Creates examples for the training and dev sets. <END_TASK> <USER_TASK:> Description: def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets."""
examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples
<SYSTEM_TASK:> Update input and output embeddings with new embedding matrice <END_TASK> <USER_TASK:> Description: def set_num_special_tokens(self, num_special_tokens): """ Update input and output embeddings with new embedding matrice Make sure we are sharing the embeddings """
self.transformer.set_num_special_tokens(num_special_tokens) self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
<SYSTEM_TASK:> Converts a sequence of tokens into ids using the vocab. <END_TASK> <USER_TASK:> Description: def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab."""
ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: logger.warning( "Token indices sequence length is longer than the specified maximum " " sequence length for this BERT model ({} > {}). Running this" " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) ) return ids
<SYSTEM_TASK:> Converts a sequence of ids in wordpiece tokens using the vocab. <END_TASK> <USER_TASK:> Description: def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens
<SYSTEM_TASK:> Save the tokenizer vocabulary to a directory or file. <END_TASK> <USER_TASK:> Description: def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary to a directory or file."""
index = 0 if os.path.isdir(vocab_path): vocab_file = os.path.join(vocab_path, VOCAB_NAME) with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!".format(vocab_file)) index = token_index writer.write(token + u'\n') index += 1 return vocab_file
<SYSTEM_TASK:> Instantiate a PreTrainedBertModel from a pre-trained model file. <END_TASK> <USER_TASK:> Description: def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True): logger.warning("The pre-trained model you are loading is a cased model but you have not set " "`do_lower_case` to False. We are setting `do_lower_case=False` for you but " "you may want to check this behavior.") kwargs['do_lower_case'] = False elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True): logger.warning("The pre-trained model you are loading is an uncased model but you have set " "`do_lower_case` to False. We are setting `do_lower_case=True` for you " "but you may want to check this behavior.") kwargs['do_lower_case'] = True else: vocab_file = pretrained_model_name_or_path if os.path.isdir(vocab_file): vocab_file = os.path.join(vocab_file, VOCAB_NAME) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) return tokenizer
<SYSTEM_TASK:> Strips accents from a piece of text. <END_TASK> <USER_TASK:> Description: def _run_strip_accents(self, text): """Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output)
<SYSTEM_TASK:> Adds whitespace around any CJK character. <END_TASK> <USER_TASK:> Description: def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character."""
output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output)
<SYSTEM_TASK:> Checks whether CP is the codepoint of a CJK character. <END_TASK> <USER_TASK:> Description: def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False
<SYSTEM_TASK:> Gets next line of random_file and starts over when reaching end of file <END_TASK> <USER_TASK:> Description: def get_next_line(self): """ Gets next line of random_file and starts over when reaching end of file"""
try: line = next(self.random_file).strip() #keep track of which document we are currently looking at to later avoid having the same doc as t1 if line == "": self.current_random_doc = self.current_random_doc + 1 line = next(self.random_file).strip() except StopIteration: self.random_file.close() self.random_file = open(self.corpus_path, "r", encoding=self.encoding) line = next(self.random_file).strip() return line
<SYSTEM_TASK:> Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but <END_TASK> <USER_TASK:> Description: def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list): """Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but with several refactors to clean it up and remove a lot of unnecessary variables."""
cand_indices = [] for (i, token) in enumerate(tokens): if token == "[CLS]" or token == "[SEP]": continue cand_indices.append(i) num_to_mask = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))) shuffle(cand_indices) mask_indices = sorted(sample(cand_indices, num_to_mask)) masked_token_labels = [] for index in mask_indices: # 80% of the time, replace with [MASK] if random() < 0.8: masked_token = "[MASK]" else: # 10% of the time, keep original if random() < 0.5: masked_token = tokens[index] # 10% of the time, replace with random word else: masked_token = choice(vocab_list) masked_token_labels.append(tokens[index]) # Once we've saved the true label for that token, we can overwrite it with the masked version tokens[index] = masked_token return tokens, mask_indices, masked_token_labels
<SYSTEM_TASK:> A map of modules from TF to PyTorch. <END_TASK> <USER_TASK:> Description: def build_tf_to_pytorch_map(model, config): """ A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """
tf_to_pt_map = {} if hasattr(model, 'transformer'): # We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax tf_to_pt_map.update({ "transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight, "transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias}) for i, (out_l, proj_l, tie_proj) in enumerate(zip( model.crit.out_layers, model.crit.out_projs, config.tie_projs)): layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i if config.tie_weight: tf_to_pt_map.update({ layer_str + 'b': out_l.bias}) else: raise NotImplementedError # I don't think this is implemented in the TF code tf_to_pt_map.update({ layer_str + 'lookup_table': out_l.weight, layer_str + 'b': out_l.bias}) if not tie_proj: tf_to_pt_map.update({ layer_str + 'proj': proj_l }) # Now load the rest of the transformer model = model.transformer # Embeddings for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)): layer_str = "transformer/adaptive_embed/cutoff_%d/" % i tf_to_pt_map.update({ layer_str + 'lookup_table': embed_l.weight, layer_str + 'proj_W': proj_l }) # Transformer blocks for i, b in enumerate(model.layers): layer_str = "transformer/layer_%d/" % i tf_to_pt_map.update({ layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight, layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight, layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight, layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight, layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias, layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight, layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias, }) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] for b in model.layers: r_r_list.append(b.dec_attn.r_r_bias) r_w_list.append(b.dec_attn.r_w_bias) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] tf_to_pt_map.update({ 'transformer/r_r_bias': r_r_list, 'transformer/r_w_bias': r_w_list}) return tf_to_pt_map
<SYSTEM_TASK:> Return DateOffset object from string or tuple representation <END_TASK> <USER_TASK:> Description: def to_offset(freq): """ Return DateOffset object from string or tuple representation or datetime.timedelta object Parameters ---------- freq : str, tuple, datetime.timedelta, DateOffset or None Returns ------- DateOffset None if freq is None. Raises ------ ValueError If freq is an invalid frequency See Also -------- DateOffset Examples -------- >>> to_offset('5min') <5 * Minutes> >>> to_offset('1D1H') <25 * Hours> >>> to_offset(('W', 2)) <2 * Weeks: weekday=6> >>> to_offset((2, 'B')) <2 * BusinessDays> >>> to_offset(datetime.timedelta(days=1)) <Day> >>> to_offset(Hour()) <Hour> """
if freq is None: return None if isinstance(freq, DateOffset): return freq if isinstance(freq, tuple): name = freq[0] stride = freq[1] if isinstance(stride, str): name, stride = stride, name name, _ = libfreqs._base_and_stride(name) delta = get_offset(name) * stride elif isinstance(freq, timedelta): delta = None freq = Timedelta(freq) try: for name in freq.components._fields: offset = _name_to_offset_map[name] stride = getattr(freq.components, name) if stride != 0: offset = stride * offset if delta is None: delta = offset else: delta = delta + offset except Exception: raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) else: delta = None stride_sign = None try: splitted = re.split(libfreqs.opattern, freq) if splitted[-1] != '' and not splitted[-1].isspace(): # the last element must be blank raise ValueError('last element must be blank') for sep, stride, name in zip(splitted[0::4], splitted[1::4], splitted[2::4]): if sep != '' and not sep.isspace(): raise ValueError('separator must be spaces') prefix = libfreqs._lite_rule_alias.get(name) or name if stride_sign is None: stride_sign = -1 if stride.startswith('-') else 1 if not stride: stride = 1 if prefix in Resolution._reso_str_bump_map.keys(): stride, name = Resolution.get_stride_from_decimal( float(stride), prefix ) stride = int(stride) offset = get_offset(name) offset = offset * int(np.fabs(stride) * stride_sign) if delta is None: delta = offset else: delta = delta + offset except Exception: raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) if delta is None: raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) return delta
<SYSTEM_TASK:> Return DateOffset object associated with rule name <END_TASK> <USER_TASK:> Description: def get_offset(name): """ Return DateOffset object associated with rule name Examples -------- get_offset('EOM') --> BMonthEnd(1) """
if name not in libfreqs._dont_uppercase: name = name.upper() name = libfreqs._lite_rule_alias.get(name, name) name = libfreqs._lite_rule_alias.get(name.lower(), name) else: name = libfreqs._lite_rule_alias.get(name, name) if name not in _offset_map: try: split = name.split('-') klass = prefix_mapping[split[0]] # handles case where there's no suffix (and will TypeError if too # many '-') offset = klass._from_name(*split[1:]) except (ValueError, TypeError, KeyError): # bad prefix or suffix raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name)) # cache _offset_map[name] = offset return _offset_map[name]
<SYSTEM_TASK:> Infer the most likely frequency given the input index. If the frequency is <END_TASK> <USER_TASK:> Description: def infer_freq(index, warn=True): """ Infer the most likely frequency given the input index. If the frequency is uncertain, a warning will be printed. Parameters ---------- index : DatetimeIndex or TimedeltaIndex if passed a Series will use the values of the series (NOT THE INDEX) warn : boolean, default True Returns ------- str or None None if no discernible frequency TypeError if the index is not datetime-like ValueError if there are less than three values. """
import pandas as pd if isinstance(index, ABCSeries): values = index._values if not (is_datetime64_dtype(values) or is_timedelta64_dtype(values) or values.dtype == object): raise TypeError("cannot infer freq from a non-convertible dtype " "on a Series of {dtype}".format(dtype=index.dtype)) index = values if is_period_arraylike(index): raise TypeError("PeriodIndex given. Check the `freq` attribute " "instead of using infer_freq.") elif is_timedelta64_dtype(index): # Allow TimedeltaIndex and TimedeltaArray inferer = _TimedeltaFrequencyInferer(index, warn=warn) return inferer.get_freq() if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex): if isinstance(index, (pd.Int64Index, pd.Float64Index)): raise TypeError("cannot infer freq from a non-convertible index " "type {type}".format(type=type(index))) index = index.values if not isinstance(index, pd.DatetimeIndex): try: index = pd.DatetimeIndex(index) except AmbiguousTimeError: index = pd.DatetimeIndex(index.asi8) inferer = _FrequencyInferer(index, warn=warn) return inferer.get_freq()
<SYSTEM_TASK:> Find the appropriate frequency string to describe the inferred <END_TASK> <USER_TASK:> Description: def get_freq(self): """ Find the appropriate frequency string to describe the inferred frequency of self.values Returns ------- str or None """
if not self.is_monotonic or not self.index._is_unique: return None delta = self.deltas[0] if _is_multiple(delta, _ONE_DAY): return self._infer_daily_rule() # Business hourly, maybe. 17: one day / 65: one weekend if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): return 'BH' # Possibly intraday frequency. Here we use the # original .asi8 values as the modified values # will not work around DST transitions. See #8772 elif not self.is_unique_asi8: return None delta = self.deltas_asi8[0] if _is_multiple(delta, _ONE_HOUR): # Hours return _maybe_add_count('H', delta / _ONE_HOUR) elif _is_multiple(delta, _ONE_MINUTE): # Minutes return _maybe_add_count('T', delta / _ONE_MINUTE) elif _is_multiple(delta, _ONE_SECOND): # Seconds return _maybe_add_count('S', delta / _ONE_SECOND) elif _is_multiple(delta, _ONE_MILLI): # Milliseconds return _maybe_add_count('L', delta / _ONE_MILLI) elif _is_multiple(delta, _ONE_MICRO): # Microseconds return _maybe_add_count('U', delta / _ONE_MICRO) else: # Nanoseconds return _maybe_add_count('N', delta)
<SYSTEM_TASK:> load a pickle, with a provided encoding <END_TASK> <USER_TASK:> Description: def load(fh, encoding=None, is_verbose=False): """load a pickle, with a provided encoding if compat is True: fake the old class hierarchy if it works, then return the new type objects Parameters ---------- fh : a filelike object encoding : an optional encoding is_verbose : show exception output """
try: fh.seek(0) if encoding is not None: up = Unpickler(fh, encoding=encoding) else: up = Unpickler(fh) up.is_verbose = is_verbose return up.load() except (ValueError, TypeError): raise
<SYSTEM_TASK:> Construct an index from sequences of data. <END_TASK> <USER_TASK:> Description: def ensure_index_from_sequences(sequences, names=None): """ Construct an index from sequences of data. A single sequence returns an Index. Many sequences returns a MultiIndex. Parameters ---------- sequences : sequence of sequences names : sequence of str Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index_from_sequences([[1, 2, 3]], names=['name']) Int64Index([1, 2, 3], dtype='int64', name='name') >>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']], names=['L1', 'L2']) MultiIndex(levels=[['a'], ['a', 'b']], codes=[[0, 0], [0, 1]], names=['L1', 'L2']) See Also -------- ensure_index """
from .multi import MultiIndex if len(sequences) == 1: if names is not None: names = names[0] return Index(sequences[0], name=names) else: return MultiIndex.from_arrays(sequences, names=names)
<SYSTEM_TASK:> Ensure that we have an index from some index-like object. <END_TASK> <USER_TASK:> Description: def ensure_index(index_like, copy=False): """ Ensure that we have an index from some index-like object. Parameters ---------- index : sequence An Index or other sequence copy : bool Returns ------- index : Index or MultiIndex Examples -------- >>> ensure_index(['a', 'b']) Index(['a', 'b'], dtype='object') >>> ensure_index([('a', 'a'), ('b', 'c')]) Index([('a', 'a'), ('b', 'c')], dtype='object') >>> ensure_index([['a', 'a'], ['b', 'c']]) MultiIndex(levels=[['a'], ['b', 'c']], codes=[[0, 0], [0, 1]]) See Also -------- ensure_index_from_sequences """
if isinstance(index_like, Index): if copy: index_like = index_like.copy() return index_like if hasattr(index_like, 'name'): return Index(index_like, name=index_like.name, copy=copy) if is_iterator(index_like): index_like = list(index_like) # must check for exactly list here because of strict type # check in clean_index_list if isinstance(index_like, list): if type(index_like) != list: index_like = list(index_like) converted, all_arrays = lib.clean_index_list(index_like) if len(converted) > 0 and all_arrays: from .multi import MultiIndex return MultiIndex.from_arrays(converted) else: index_like = converted else: # clean_index_list does the equivalent of copying # so only need to do this if not list instance if copy: from copy import copy index_like = copy(index_like) return Index(index_like)
<SYSTEM_TASK:> We require that we have a dtype compat for the values. If we are passed <END_TASK> <USER_TASK:> Description: def _simple_new(cls, values, name=None, dtype=None, **kwargs): """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """
if not hasattr(values, 'dtype'): if (values is None or not len(values)) and dtype is not None: values = np.empty(0, dtype=dtype) else: values = np.array(values, copy=False) if is_object_dtype(values): values = cls(values, name=name, dtype=dtype, **kwargs)._ndarray_values if isinstance(values, (ABCSeries, ABCIndexClass)): # Index._data must always be an ndarray. # This is no-copy for when _values is an ndarray, # which should be always at this point. values = np.asarray(values._values) result = object.__new__(cls) result._data = values # _index_data is a (temporary?) fix to ensure that the direct data # manipulation we do in `_libs/reduction.pyx` continues to work. # We need access to the actual ndarray, since we're messing with # data buffers and strides. We don't re-use `_ndarray_values`, since # we actually set this value too. result._index_data = values result.name = name for k, v in kwargs.items(): setattr(result, k, v) return result._reset_identity()
<SYSTEM_TASK:> Create a new Index inferring the class with passed value, don't copy <END_TASK> <USER_TASK:> Description: def _shallow_copy_with_infer(self, values, **kwargs): """ Create a new Index inferring the class with passed value, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional kwargs : updates the default attributes for this Index """
attributes = self._get_attributes_dict() attributes.update(kwargs) attributes['copy'] = False if not len(values) and 'dtype' not in kwargs: attributes['dtype'] = self.dtype if self._infer_as_myclass: try: return self._constructor(values, **attributes) except (TypeError, ValueError): pass return Index(values, **attributes)
<SYSTEM_TASK:> More flexible, faster check like ``is`` but that works through views. <END_TASK> <USER_TASK:> Description: def is_(self, other): """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object other object to compare against. Returns ------- True if both have same underlying data, False otherwise : bool """
# use something other than None to be clearer return self._id is getattr( other, '_id', Ellipsis) and self._id is not None
<SYSTEM_TASK:> Internal method to handle NA filling of take. <END_TASK> <USER_TASK:> Description: def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=np.nan): """ Internal method to handle NA filling of take. """
indices = ensure_platform_int(indices) # only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: if (indices < -1).any(): msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) taken = algos.take(values, indices, allow_fill=allow_fill, fill_value=na_value) else: taken = values.take(indices) return taken
<SYSTEM_TASK:> Return the formatted data as a unicode string. <END_TASK> <USER_TASK:> Description: def _format_data(self, name=None): """ Return the formatted data as a unicode string. """
# do we want to justify (only do so for non-objects) is_justify = not (self.inferred_type in ('string', 'unicode') or (self.inferred_type == 'categorical' and is_object_dtype(self.categories))) return format_object_summary(self, self._formatter_func, is_justify=is_justify, name=name)
<SYSTEM_TASK:> Render a string representation of the Index. <END_TASK> <USER_TASK:> Description: def format(self, name=False, formatter=None, **kwargs): """ Render a string representation of the Index. """
header = [] if name: header.append(pprint_thing(self.name, escape_chars=('\t', '\r', '\n')) if self.name is not None else '') if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, **kwargs)
<SYSTEM_TASK:> Format specified values of `self` and return them. <END_TASK> <USER_TASK:> Description: def to_native_types(self, slicer=None, **kwargs): """ Format specified values of `self` and return them. Parameters ---------- slicer : int, array-like An indexer into `self` that specifies which values are used in the formatting process. kwargs : dict Options for specifying how the values should be formatted. These options include the following: 1) na_rep : str The value that serves as a placeholder for NULL values 2) quoting : bool or None Whether or not there are quoted values in `self` 3) date_format : str The format used to represent date-like values """
values = self if slicer is not None: values = values[slicer] return values._format_native_types(**kwargs)
<SYSTEM_TASK:> Actually format specific types of the index. <END_TASK> <USER_TASK:> Description: def _format_native_types(self, na_rep='', quoting=None, **kwargs): """ Actually format specific types of the index. """
mask = isna(self) if not self.is_object() and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values
<SYSTEM_TASK:> Create a Series with both index and values equal to the index keys <END_TASK> <USER_TASK:> Description: def to_series(self, index=None, name=None): """ Create a Series with both index and values equal to the index keys useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional index of resulting Series. If None, defaults to original index name : string, optional name of resulting Series. If None, defaults to name of original index Returns ------- Series : dtype will be based on the type of the Index values. """
from pandas import Series if index is None: index = self._shallow_copy() if name is None: name = self.name return Series(self.values.copy(), index=index, name=name)
<SYSTEM_TASK:> Create a DataFrame with a column containing the Index. <END_TASK> <USER_TASK:> Description: def to_frame(self, index=True, name=None): """ Create a DataFrame with a column containing the Index. .. versionadded:: 0.24.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original Index. name : object, default None The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """
from pandas import DataFrame if name is None: name = self.name or 0 result = DataFrame({name: self._values.copy()}) if index: result.index = self return result
<SYSTEM_TASK:> Handles the quirks of having a singular 'name' parameter for general <END_TASK> <USER_TASK:> Description: def _validate_names(self, name=None, names=None, deep=False): """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """
from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") elif names is None and name is None: return deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") return names else: if not is_list_like(name): return [name] return name
<SYSTEM_TASK:> Set Index or MultiIndex name. <END_TASK> <USER_TASK:> Description: def set_names(self, names, level=None, inplace=False): """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label Name(s) to set. level : int, label or list of int or label, optional If the index is a MultiIndex, level(s) to set (None for all levels). Otherwise level must be None. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index The same type as the caller or None if inplace is True. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Int64Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Int64Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]]) >>> idx.set_names(['kind', 'year'], inplace=True) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['species', 'year']) """
if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError('Level must be None for non-MultiIndex') if level is not None and not is_list_like(level) and is_list_like( names): msg = "Names must be a string when a single level is provided." raise TypeError(msg) if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._shallow_copy() idx._set_names(names, level=level) if not inplace: return idx
<SYSTEM_TASK:> Alter Index or MultiIndex name. <END_TASK> <USER_TASK:> Description: def rename(self, name, inplace=False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : boolean, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index The same type as the caller or None if inplace is True. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """
return self.set_names([name], inplace=inplace)
<SYSTEM_TASK:> Validate index level. <END_TASK> <USER_TASK:> Description: def _validate_index_level(self, level): """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """
if isinstance(level, int): if level < 0 and level != -1: raise IndexError("Too many levels: Index has only 1 level," " %d is not a valid level number" % (level, )) elif level > 0: raise IndexError("Too many levels:" " Index has only 1 level, not %d" % (level + 1)) elif level != self.name: raise KeyError('Level %s must be same as name (%s)' % (level, self.name))
<SYSTEM_TASK:> For internal compatibility with with the Index API. <END_TASK> <USER_TASK:> Description: def sortlevel(self, level=None, ascending=True, sort_remaining=None): """ For internal compatibility with with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : boolean, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """
return self.sort_values(return_indexer=True, ascending=ascending)
<SYSTEM_TASK:> Return if each value is NaN. <END_TASK> <USER_TASK:> Description: def _isnan(self): """ Return if each value is NaN. """
if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values
<SYSTEM_TASK:> Extract duplicated index elements. <END_TASK> <USER_TASK:> Description: def get_duplicates(self): """ Extract duplicated index elements. .. deprecated:: 0.23.0 Use idx[idx.duplicated()].unique() instead Returns a sorted list of index elements which appear more than once in the index. Returns ------- array-like List of duplicated indexes. See Also -------- Index.duplicated : Return boolean array denoting duplicates. Index.drop_duplicates : Return Index with duplicates removed. Examples -------- Works on different Index of types. >>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates() # doctest: +SKIP [2, 3] Note that for a DatetimeIndex, it does not return a list but a new DatetimeIndex: >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03', ... '2018-01-03', '2018-01-04', '2018-01-04'], ... format='%Y-%m-%d') >>> pd.Index(dates).get_duplicates() # doctest: +SKIP DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', freq=None) Sorts duplicated elements even when indexes are unordered. >>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates() # doctest: +SKIP [2, 3] Return empty array-like structure when all elements are unique. >>> pd.Index([1, 2, 3, 4]).get_duplicates() # doctest: +SKIP [] >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'], ... format='%Y-%m-%d') >>> pd.Index(dates).get_duplicates() # doctest: +SKIP DatetimeIndex([], dtype='datetime64[ns]', freq=None) """
warnings.warn("'get_duplicates' is deprecated and will be removed in " "a future release. You can use " "idx[idx.duplicated()].unique() instead", FutureWarning, stacklevel=2) return self[self.duplicated()].unique()
<SYSTEM_TASK:> Returns an index containing unique values. <END_TASK> <USER_TASK:> Description: def _get_unique_index(self, dropna=False): """ Returns an index containing unique values. Parameters ---------- dropna : bool If True, NaN values are dropped. Returns ------- uniques : index """
if self.is_unique and not dropna: return self values = self.values if not self.is_unique: values = self.unique() if dropna: try: if self.hasnans: values = values[~isna(values)] except NotImplementedError: pass return self._shallow_copy(values)
<SYSTEM_TASK:> If the result of a set operation will be self, <END_TASK> <USER_TASK:> Description: def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """
name = get_op_result_name(self, other) if self.name != name: return self._shallow_copy(name=name) return self
<SYSTEM_TASK:> Form the union of two Index objects. <END_TASK> <USER_TASK:> Description: def union(self, other, sort=None): """ Form the union of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- union : Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Int64Index([1, 2, 3, 4, 5, 6], dtype='int64') """
self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other = ensure_index(other) if len(other) == 0 or self.equals(other): return self._get_reconciled_name_object(other) if len(self) == 0: return other._get_reconciled_name_object(self) # TODO: is_dtype_union_equal is a hack around # 1. buggy set ops with duplicates (GH #13432) # 2. CategoricalIndex lacking setops (GH #10186) # Once those are fixed, this workaround can be removed if not is_dtype_union_equal(self.dtype, other.dtype): this = self.astype('O') other = other.astype('O') return this.union(other, sort=sort) # TODO(EA): setops-refactor, clean all this up if is_period_dtype(self) or is_datetime64tz_dtype(self): lvals = self._ndarray_values else: lvals = self._values if is_period_dtype(other) or is_datetime64tz_dtype(other): rvals = other._ndarray_values else: rvals = other._values if sort is None and self.is_monotonic and other.is_monotonic: try: result = self._outer_indexer(lvals, rvals)[0] except TypeError: # incomparable objects result = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) result.extend([x for x in rvals if x not in value_set]) else: indexer = self.get_indexer(other) indexer, = (indexer == -1).nonzero() if len(indexer) > 0: other_diff = algos.take_nd(rvals, indexer, allow_fill=False) result = _concat._concat_compat((lvals, other_diff)) else: result = lvals if sort is None: try: result = sorting.safe_sort(result) except TypeError as e: warnings.warn("{}, sort order is undefined for " "incomparable objects".format(e), RuntimeWarning, stacklevel=3) # for subclasses return self._wrap_setop_result(other, result)
<SYSTEM_TASK:> Return a new Index with elements from the index that are not in <END_TASK> <USER_TASK:> Description: def difference(self, other, sort=None): """ Return a new Index with elements from the index that are not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : False or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- difference : Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Int64Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Int64Index([2, 1], dtype='int64') """
self._validate_sort_keyword(sort) self._assert_can_do_setop(other) if self.equals(other): # pass an empty np.ndarray with the appropriate dtype return self._shallow_copy(self._data[:0]) other, result_name = self._convert_can_do_setop(other) this = self._get_unique_index() indexer = this.get_indexer(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff = this.values.take(label_diff) if sort is None: try: the_diff = sorting.safe_sort(the_diff) except TypeError: pass return this._shallow_copy(the_diff, name=result_name, freq=None)
<SYSTEM_TASK:> Compute the symmetric difference of two Index objects. <END_TASK> <USER_TASK:> Description: def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : False or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- symmetric_difference : Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Int64Index([1, 5], dtype='int64') You can also use the ``^`` operator: >>> idx1 ^ idx2 Int64Index([1, 5], dtype='int64') """
self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update this = self._get_unique_index() other = other._get_unique_index() indexer = this.get_indexer(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d(np.arange(this.size), common_indexer, assume_unique=True) left_diff = this.values.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.values.take(right_indexer) the_diff = _concat._concat_compat([left_diff, right_diff]) if sort is None: try: the_diff = sorting.safe_sort(the_diff) except TypeError: pass attribs = self._get_attributes_dict() attribs['name'] = result_name if 'freq' in attribs: attribs['freq'] = None return self._shallow_copy_with_infer(the_diff, **attribs)
<SYSTEM_TASK:> Attempt to convert an array of data into an integer index. <END_TASK> <USER_TASK:> Description: def _try_convert_to_int_index(cls, data, copy, name, dtype): """ Attempt to convert an array of data into an integer index. Parameters ---------- data : The data to convert. copy : Whether to copy the data or not. name : The name of the index returned. Returns ------- int_index : data converted to either an Int64Index or a UInt64Index Raises ------ ValueError if the conversion was not successful. """
from .numeric import Int64Index, UInt64Index if not is_unsigned_integer_dtype(dtype): # skip int64 conversion attempt if uint-like dtype is passed, as # this could return Int64Index when UInt64Index is what's desrired try: res = data.astype('i8', copy=False) if (res == data).all(): return Int64Index(res, copy=copy, name=name) except (OverflowError, TypeError, ValueError): pass # Conversion to int64 failed (possibly due to overflow) or was skipped, # so let's try now with uint64. try: res = data.astype('u8', copy=False) if (res == data).all(): return UInt64Index(res, copy=copy, name=name) except (OverflowError, TypeError, ValueError): pass raise ValueError
<SYSTEM_TASK:> Coerces data to ndarray. <END_TASK> <USER_TASK:> Description: def _coerce_to_ndarray(cls, data): """ Coerces data to ndarray. Converts other iterables to list first and then to array. Does not touch ndarrays. Raises ------ TypeError When the data passed in is a scalar. """
if not isinstance(data, (np.ndarray, Index)): if data is None or is_scalar(data): cls._scalar_data_error(data) # other iterable of some kind if not isinstance(data, (ABCSeries, list, tuple)): data = list(data) data = np.asarray(data) return data
<SYSTEM_TASK:> We need to coerce a scalar to a compat for our index type. <END_TASK> <USER_TASK:> Description: def _coerce_scalar_to_index(self, item): """ We need to coerce a scalar to a compat for our index type. Parameters ---------- item : scalar item to coerce """
dtype = self.dtype if self._is_numeric_dtype and isna(item): # We can't coerce to the numeric dtype of "self" (unless # it's float) if there are NaN values in our output. dtype = None return Index([item], dtype=dtype, **self._get_attributes_dict())
<SYSTEM_TASK:> Check value is valid for scalar op. <END_TASK> <USER_TASK:> Description: def _assert_can_do_op(self, value): """ Check value is valid for scalar op. """
if not is_scalar(value): msg = "'value' must be a scalar, passed: {0}" raise TypeError(msg.format(type(value).__name__))
<SYSTEM_TASK:> Append a collection of Index options together. <END_TASK> <USER_TASK:> Description: def append(self, other): """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index """
to_concat = [self] if isinstance(other, (list, tuple)): to_concat = to_concat + list(other) else: to_concat.append(other) for obj in to_concat: if not isinstance(obj, Index): raise TypeError('all inputs must be Index') names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name)
<SYSTEM_TASK:> Return a new Index of the values set with the mask. <END_TASK> <USER_TASK:> Description: def putmask(self, mask, value): """ Return a new Index of the values set with the mask. See Also -------- numpy.ndarray.putmask """
values = self.values.copy() try: np.putmask(values, mask, self._convert_for_op(value)) return self._shallow_copy(values) except (ValueError, TypeError) as err: if is_object_dtype(self): raise err # coerces to object return self.astype(object).putmask(mask, value)
<SYSTEM_TASK:> Determine if two Index objects contain the same elements. <END_TASK> <USER_TASK:> Description: def equals(self, other): """ Determine if two Index objects contain the same elements. """
if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self) and not is_object_dtype(other): # if other is not object, use other's logic for coercion return other.equals(self) try: return array_equivalent(com.values_from_object(self), com.values_from_object(other)) except Exception: return False
<SYSTEM_TASK:> Similar to equals, but check that other comparable attributes are <END_TASK> <USER_TASK:> Description: def identical(self, other): """ Similar to equals, but check that other comparable attributes are also equal. """
return (self.equals(other) and all((getattr(self, c, None) == getattr(other, c, None) for c in self._comparables)) and type(self) == type(other))
<SYSTEM_TASK:> Return the label from the index, or, if not present, the previous one. <END_TASK> <USER_TASK:> Description: def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """
try: loc = self.get_loc(label, method='pad') except KeyError: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc]
<SYSTEM_TASK:> Return a sorted copy of the index. <END_TASK> <USER_TASK:> Description: def sort_values(self, return_indexer=False, ascending=True): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Int64Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Int64Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """
_as = self.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index
<SYSTEM_TASK:> Return the integer indices that would sort the index. <END_TASK> <USER_TASK:> Description: def argsort(self, *args, **kwargs): """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- numpy.ndarray Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """
result = self.asi8 if result is None: result = np.array(self) return result.argsort(*args, **kwargs)
<SYSTEM_TASK:> Fast lookup of value from 1-dimensional ndarray. Only use this if you <END_TASK> <USER_TASK:> Description: def get_value(self, series, key): """ Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing. """
# if we have something that is Index-like, then # use this, e.g. DatetimeIndex # Things like `Series._get_value` (via .at) pass the EA directly here. s = getattr(series, '_values', series) if isinstance(s, (ExtensionArray, Index)) and is_scalar(key): # GH 20882, 21257 # Unify Index and ExtensionArray treatment # First try to convert the key to a location # If that fails, raise a KeyError if an integer # index, otherwise, see if key is an integer, and # try that try: iloc = self.get_loc(key) return s[iloc] except KeyError: if (len(self) > 0 and (self.holds_integer() or self.is_boolean())): raise elif is_integer(key): return s[key] s = com.values_from_object(series) k = com.values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') try: return self._engine.get_value(s, k, tz=getattr(series.dtype, 'tz', None)) except KeyError as e1: if len(self) > 0 and (self.holds_integer() or self.is_boolean()): raise try: return libindex.get_value_box(s, key) except IndexError: raise except TypeError: # generator/iterator-like if is_iterator(key): raise InvalidIndexError(key) else: raise e1 except Exception: # pragma: no cover raise e1 except TypeError: # python 3 if is_scalar(key): # pragma: no cover raise IndexError(key) raise InvalidIndexError(key)
<SYSTEM_TASK:> Fast lookup of value from 1-dimensional ndarray. <END_TASK> <USER_TASK:> Description: def set_value(self, arr, key, value): """ Fast lookup of value from 1-dimensional ndarray. Notes ----- Only use this if you know what you're doing. """
self._engine.set_value(com.values_from_object(arr), com.values_from_object(key), value)
<SYSTEM_TASK:> Guaranteed return of an indexer even when non-unique. <END_TASK> <USER_TASK:> Description: def get_indexer_for(self, target, **kwargs): """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_nonunique as appropriate. """
if self.is_unique: return self.get_indexer(target, **kwargs) indexer, _ = self.get_indexer_non_unique(target, **kwargs) return indexer
<SYSTEM_TASK:> Group the index labels by a given array of values. <END_TASK> <USER_TASK:> Description: def groupby(self, values): """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- groups : dict {group name -> group labels} """
# TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values.values values = ensure_categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return result
<SYSTEM_TASK:> Return a boolean array where the index values are in `values`. <END_TASK> <USER_TASK:> Description: def isin(self, values, level=None): """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. .. versionadded:: 0.18.1 Support for values as a set. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- is_contained : ndarray NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Int64Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']], codes=[[0, 1, 2], [2, 0, 1]], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """
if level is not None: self._validate_index_level(level) return algos.isin(self, values)
<SYSTEM_TASK:> For an ordered or unique index, compute the slice indexer for input <END_TASK> <USER_TASK:> Description: def slice_indexer(self, start=None, end=None, step=None, kind=None): """ For an ordered or unique index, compute the slice indexer for input labels and step. Parameters ---------- start : label, default None If None, defaults to the beginning end : label, default None If None, defaults to the end step : int, default None kind : string, default None Returns ------- indexer : slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples --------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3) """
start_slice, end_slice = self.slice_locs(start, end, step=step, kind=kind) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step)
<SYSTEM_TASK:> If we have a float key and are not a floating index, then try to cast <END_TASK> <USER_TASK:> Description: def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """
if is_float(key) and not self.is_floating(): try: ckey = int(key) if ckey == key: key = ckey except (OverflowError, ValueError, TypeError): pass return key
<SYSTEM_TASK:> If we are positional indexer, validate that we have appropriate <END_TASK> <USER_TASK:> Description: def _validate_indexer(self, form, key, kind): """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """
assert kind in ['ix', 'loc', 'getitem', 'iloc'] if key is None: pass elif is_integer(key): pass elif kind in ['iloc', 'getitem']: self._invalid_indexer(form, key) return key
<SYSTEM_TASK:> Calculate slice bound that corresponds to given label. <END_TASK> <USER_TASK:> Description: def get_slice_bound(self, label, side, kind): """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} kind : {'ix', 'loc', 'getitem'} """
assert kind in ['ix', 'loc', 'getitem', None] if side not in ('left', 'right'): raise ValueError("Invalid value for side kwarg," " must be either 'left' or 'right': %s" % (side, )) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side, kind) # we need to look up the label try: slc = self._get_loc_only_exact_matches(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array or an array of indices, which # is OK as long as they are representable by a slice. if is_bool_dtype(slc): slc = lib.maybe_booleans_to_slice(slc.view('u1')) else: slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self)) if isinstance(slc, np.ndarray): raise KeyError("Cannot get %s slice bound for non-unique " "label: %r" % (side, original_label)) if isinstance(slc, slice): if side == 'left': return slc.start else: return slc.stop else: if side == 'right': return slc + 1 else: return slc
<SYSTEM_TASK:> Compute slice locations for input labels. <END_TASK> <USER_TASK:> Description: def slice_locs(self, start=None, end=None, step=None, kind=None): """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning end : label, default None If None, defaults to the end step : int, defaults None If None, defaults to 1 kind : {'ix', 'loc', 'getitem'} or None Returns ------- start, end : int See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples --------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """
inc = (step is None or step >= 0) if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if (isinstance(start, (str, datetime)) and isinstance(end, (str, datetime))): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the " "same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, 'left', kind) if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, 'right', kind) if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice
<SYSTEM_TASK:> Make new Index with passed list of labels deleted. <END_TASK> <USER_TASK:> Description: def drop(self, labels, errors='raise'): """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- dropped : Index Raises ------ KeyError If not all of the labels are found in the selected axis """
arr_dtype = 'object' if self.dtype == 'object' else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): if errors != 'ignore': raise KeyError( '{} not found in axis'.format(labels[mask])) indexer = indexer[~mask] return self.delete(indexer)
<SYSTEM_TASK:> Add in comparison methods. <END_TASK> <USER_TASK:> Description: def _add_comparison_methods(cls): """ Add in comparison methods. """
cls.__eq__ = _make_comparison_op(operator.eq, cls) cls.__ne__ = _make_comparison_op(operator.ne, cls) cls.__lt__ = _make_comparison_op(operator.lt, cls) cls.__gt__ = _make_comparison_op(operator.gt, cls) cls.__le__ = _make_comparison_op(operator.le, cls) cls.__ge__ = _make_comparison_op(operator.ge, cls)
<SYSTEM_TASK:> Validate if we can perform a numeric unary operation. <END_TASK> <USER_TASK:> Description: def _validate_for_numeric_unaryop(self, op, opstr): """ Validate if we can perform a numeric unary operation. """
if not self._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op " "{opstr} for type: {typ}" .format(opstr=opstr, typ=type(self).__name__))
<SYSTEM_TASK:> Return valid other; evaluate or raise TypeError if we are not of <END_TASK> <USER_TASK:> Description: def _validate_for_numeric_binop(self, other, op): """ Return valid other; evaluate or raise TypeError if we are not of the appropriate type. Notes ----- This is an internal method called by ops. """
opstr = '__{opname}__'.format(opname=op.__name__) # if we are an inheritor of numeric, # but not actually numeric (e.g. DatetimeIndex/PeriodIndex) if not self._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op {opstr} " "for type: {typ}" .format(opstr=opstr, typ=type(self).__name__)) if isinstance(other, Index): if not other._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op " "{opstr} with type: {typ}" .format(opstr=opstr, typ=type(other))) elif isinstance(other, np.ndarray) and not other.ndim: other = other.item() if isinstance(other, (Index, ABCSeries, np.ndarray)): if len(self) != len(other): raise ValueError("cannot evaluate a numeric op with " "unequal lengths") other = com.values_from_object(other) if other.dtype.kind not in ['f', 'i', 'u']: raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") elif isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)): # higher up to handle pass elif isinstance(other, (datetime, np.datetime64)): # higher up to handle pass else: if not (is_float(other) or is_integer(other)): raise TypeError("can only perform ops with scalar values") return other
<SYSTEM_TASK:> given an object and the specifications, setup the internal grouper <END_TASK> <USER_TASK:> Description: def _set_grouper(self, obj, sort=False): """ given an object and the specifications, setup the internal grouper for this particular specification Parameters ---------- obj : the subject object sort : bool, default False whether the resulting grouper should be sorted """
if self.key is not None and self.level is not None: raise ValueError( "The Grouper cannot specify both a key and a level!") # Keep self.grouper value before overriding if self._grouper is None: self._grouper = self.grouper # the key must be a valid info item if self.key is not None: key = self.key # The 'on' is already defined if (getattr(self.grouper, 'name', None) == key and isinstance(obj, ABCSeries)): ax = self._grouper.take(obj.index) else: if key not in obj._info_axis: raise KeyError( "The grouper name {0} is not found".format(key)) ax = Index(obj[key], name=key) else: ax = obj._get_axis(self.axis) if self.level is not None: level = self.level # if a level is given it must be a mi level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) ax = Index(ax._get_level_values(level), name=ax.names[level]) else: if level not in (0, ax.name): raise ValueError( "The level {0} is not valid".format(level)) # possibly sort if (self.sort or sort) and not ax.is_monotonic: # use stable sort to support first, last, nth indexer = self.indexer = ax.argsort(kind='mergesort') ax = ax.take(indexer) obj = obj._take(indexer, axis=self.axis, is_copy=False) self.obj = obj self.grouper = ax return self.grouper
<SYSTEM_TASK:> Passed off to scipy.interpolate.interp1d. method is scipy's kind. <END_TASK> <USER_TASK:> Description: def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs): """ Passed off to scipy.interpolate.interp1d. method is scipy's kind. Returns an array interpolated at new_x. Add any new methods to the list in _clean_interp_method. """
try: from scipy import interpolate # TODO: Why is DatetimeIndex being imported here? from pandas import DatetimeIndex # noqa except ImportError: raise ImportError('{method} interpolation requires SciPy' .format(method=method)) new_x = np.asarray(new_x) # ignores some kwargs that could be passed along. alt_methods = { 'barycentric': interpolate.barycentric_interpolate, 'krogh': interpolate.krogh_interpolate, 'from_derivatives': _from_derivatives, 'piecewise_polynomial': _from_derivatives, } if getattr(x, 'is_all_dates', False): # GH 5975, scipy.interp1d can't hande datetime64s x, new_x = x._values.astype('i8'), new_x.astype('i8') if method == 'pchip': try: alt_methods['pchip'] = interpolate.pchip_interpolate except AttributeError: raise ImportError("Your version of Scipy does not support " "PCHIP interpolation.") elif method == 'akima': try: from scipy.interpolate import Akima1DInterpolator # noqa alt_methods['akima'] = _akima_interpolate except ImportError: raise ImportError("Your version of Scipy does not support " "Akima interpolation.") interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial'] if method in interp1d_methods: if method == 'polynomial': method = order terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error) new_y = terp(new_x) elif method == 'spline': # GH #10633, #24014 if isna(order) or (order <= 0): raise ValueError("order needs to be specified and greater than 0; " "got order: {}".format(order)) terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs) new_y = terp(new_x) else: # GH 7295: need to be able to write for some reason # in some circumstances: check all three if not x.flags.writeable: x = x.copy() if not y.flags.writeable: y = y.copy() if not new_x.flags.writeable: new_x = new_x.copy() method = alt_methods[method] new_y = method(x, y, new_x, **kwargs) return new_y
<SYSTEM_TASK:> Convenience function for interpolate.BPoly.from_derivatives. <END_TASK> <USER_TASK:> Description: def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False): """ Convenience function for interpolate.BPoly.from_derivatives. Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. Parameters ---------- xi : array_like sorted 1D array of x-coordinates yi : array_like or list of array-likes yi[i][j] is the j-th derivative known at xi[i] order: None or int or array_like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. der : int or list How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This numberincludes the function value as 0th derivative. extrapolate : bool, optional Whether to extrapolate to ouf-of-bounds points based on first and last intervals, or to return NaNs. Default: True. See Also -------- scipy.interpolate.BPoly.from_derivatives Returns ------- y : scalar or array_like The result, of length R or length M or M by R. """
from scipy import interpolate # return the method for compat with scipy version & backwards compat method = interpolate.BPoly.from_derivatives m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate) return m(x)
<SYSTEM_TASK:> Perform an actual interpolation of values, values will be make 2-d if <END_TASK> <USER_TASK:> Description: def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None): """ Perform an actual interpolation of values, values will be make 2-d if needed fills inplace, returns the result. """
transf = (lambda x: x) if axis == 0 else (lambda x: x.T) # reshape a 1 dim if needed ndim = values.ndim if values.ndim == 1: if axis != 0: # pragma: no cover raise AssertionError("cannot interpolate on a ndim == 1 with " "axis != 0") values = values.reshape(tuple((1,) + values.shape)) if fill_value is None: mask = None else: # todo create faster fill func without masking mask = mask_missing(transf(values), fill_value) method = clean_fill_method(method) if method == 'pad': values = transf(pad_2d( transf(values), limit=limit, mask=mask, dtype=dtype)) else: values = transf(backfill_2d( transf(values), limit=limit, mask=mask, dtype=dtype)) # reshape back if ndim == 1: values = values[0] return values
<SYSTEM_TASK:> Cast values to a dtype that algos.pad and algos.backfill can handle. <END_TASK> <USER_TASK:> Description: def _cast_values_for_fillna(values, dtype): """ Cast values to a dtype that algos.pad and algos.backfill can handle. """
# TODO: for int-dtypes we make a copy, but for everything else this # alters the values in-place. Is this intentional? if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or is_timedelta64_dtype(dtype)): values = values.view(np.int64) elif is_integer_dtype(values): # NB: this check needs to come after the datetime64 check above values = ensure_float64(values) return values
<SYSTEM_TASK:> If this is a reversed op, then flip x,y <END_TASK> <USER_TASK:> Description: def fill_zeros(result, x, y, name, fill): """ If this is a reversed op, then flip x,y If we have an integer value (or array in y) and we have 0's, fill them with the fill, return the result. Mask the nan's from x. """
if fill is None or is_float_dtype(result): return result if name.startswith(('r', '__r')): x, y = y, x is_variable_type = (hasattr(y, 'dtype') or hasattr(y, 'type')) is_scalar_type = is_scalar(y) if not is_variable_type and not is_scalar_type: return result if is_scalar_type: y = np.array(y) if is_integer_dtype(y): if (y == 0).any(): # GH 7325, mask and nans must be broadcastable (also: PR 9308) # Raveling and then reshaping makes np.putmask faster mask = ((y == 0) & ~np.isnan(result)).ravel() shape = result.shape result = result.astype('float64', copy=False).ravel() np.putmask(result, mask, fill) # if we have a fill of inf, then sign it correctly # (GH 6178 and PR 9308) if np.isinf(fill): signs = y if name.startswith(('r', '__r')) else x signs = np.sign(signs.astype('float', copy=False)) negative_inf_mask = (signs.ravel() < 0) & mask np.putmask(result, negative_inf_mask, -fill) if "floordiv" in name: # (PR 9308) nan_mask = ((y == 0) & (x == 0)).ravel() np.putmask(result, nan_mask, np.nan) result = result.reshape(shape) return result
<SYSTEM_TASK:> Fill nulls caused by division by zero, casting to a diffferent dtype <END_TASK> <USER_TASK:> Description: def dispatch_missing(op, left, right, result): """ Fill nulls caused by division by zero, casting to a diffferent dtype if necessary. Parameters ---------- op : function (operator.add, operator.div, ...) left : object (Index for non-reversed ops) right : object (Index fof reversed ops) result : ndarray Returns ------- result : ndarray """
opstr = '__{opname}__'.format(opname=op.__name__).replace('____', '__') if op in [operator.truediv, operator.floordiv, getattr(operator, 'div', None)]: result = mask_zero_div_zero(left, right, result) elif op is operator.mod: result = fill_zeros(result, left, right, opstr, np.nan) elif op is divmod: res0 = mask_zero_div_zero(left, right, result[0]) res1 = fill_zeros(result[1], left, right, opstr, np.nan) result = (res0, res1) return result
<SYSTEM_TASK:> Get indexers of values that won't be filled <END_TASK> <USER_TASK:> Description: def _interp_limit(invalid, fw_limit, bw_limit): """ Get indexers of values that won't be filled because they exceed the limits. Parameters ---------- invalid : boolean ndarray fw_limit : int or None forward limit to index bw_limit : int or None backward limit to index Returns ------- set of indexers Notes ----- This is equivalent to the more readable, but slower .. code-block:: python def _interp_limit(invalid, fw_limit, bw_limit): for x in np.where(invalid)[0]: if invalid[max(0, x - fw_limit):x + bw_limit + 1].all(): yield x """
# handle forward first; the backward direction is the same except # 1. operate on the reversed array # 2. subtract the returned indices from N - 1 N = len(invalid) f_idx = set() b_idx = set() def inner(invalid, limit): limit = min(limit, N) windowed = _rolling_window(invalid, limit + 1).all(1) idx = (set(np.where(windowed)[0] + limit) | set(np.where((~invalid[:limit + 1]).cumsum() == 0)[0])) return idx if fw_limit is not None: if fw_limit == 0: f_idx = set(np.where(invalid)[0]) else: f_idx = inner(invalid, fw_limit) if bw_limit is not None: if bw_limit == 0: # then we don't even need to care about backwards # just use forwards return f_idx else: b_idx = list(inner(invalid[::-1], bw_limit)) b_idx = set(N - 1 - np.asarray(b_idx)) if fw_limit == 0: return b_idx return f_idx & b_idx
<SYSTEM_TASK:> check if we're running in an interactive shell <END_TASK> <USER_TASK:> Description: def in_interactive_session(): """ check if we're running in an interactive shell returns True if running under python/ipython interactive shell """
from pandas import get_option def check_main(): try: import __main__ as main except ModuleNotFoundError: return get_option('mode.sim_interactive') return (not hasattr(main, '__file__') or get_option('mode.sim_interactive')) try: return __IPYTHON__ or check_main() # noqa except NameError: return check_main()
<SYSTEM_TASK:> Code the categories to ensure we can groupby for categoricals. <END_TASK> <USER_TASK:> Description: def recode_for_groupby(c, sort, observed): """ Code the categories to ensure we can groupby for categoricals. If observed=True, we return a new Categorical with the observed categories only. If sort=False, return a copy of self, coded with categories as returned by .unique(), followed by any categories not appearing in the data. If sort=True, return self. This method is needed solely to ensure the categorical index of the GroupBy result has categories in the order of appearance in the data (GH-8868). Parameters ---------- c : Categorical sort : boolean The value of the sort parameter groupby was called with. observed : boolean Account only for the observed values Returns ------- New Categorical If sort=False, the new categories are set to the order of appearance in codes (unless ordered=True, in which case the original order is preserved), followed by any unrepresented categories in the original order. Categorical or None If we are observed, return the original categorical, otherwise None """
# we only care about observed values if observed: unique_codes = unique1d(c.codes) take_codes = unique_codes[unique_codes != -1] if c.ordered: take_codes = np.sort(take_codes) # we recode according to the uniques categories = c.categories.take(take_codes) codes = _recode_for_categories(c.codes, c.categories, categories) # return a new categorical that maps our new codes # and categories dtype = CategoricalDtype(categories, ordered=c.ordered) return Categorical(codes, dtype=dtype, fastpath=True), c # Already sorted according to c.categories; all is fine if sort: return c, None # sort=False should order groups in as-encountered order (GH-8868) cat = c.unique() # But for groupby to work, all categories should be present, # including those missing from the data (GH-13179), which .unique() # above dropped cat = cat.add_categories( c.categories[~c.categories.isin(cat.categories)]) return c.reorder_categories(cat.categories), None
<SYSTEM_TASK:> Write a DataFrame to the parquet format. <END_TASK> <USER_TASK:> Description: def to_parquet(df, path, engine='auto', compression='snappy', index=None, partition_cols=None, **kwargs): """ Write a DataFrame to the parquet format. Parameters ---------- path : str File path or Root Directory path. Will be used as Root Directory path while writing a partitioned dataset. .. versionchanged:: 0.24.0 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, the engine's default behavior will be used. .. versionadded 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset Columns are partitioned in the order they are given .. versionadded:: 0.24.0 kwargs Additional keyword arguments passed to the engine """
impl = get_engine(engine) return impl.write(df, path, compression=compression, index=index, partition_cols=partition_cols, **kwargs)