text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Start coverage reporting in kernel. <END_TASK> <USER_TASK:> Description: def setup_coverage(config, kernel, floc, output_loc=None): """Start coverage reporting in kernel. Currently supported kernel languages are: - Python """
language = kernel.language if language.startswith('python'): # Get the pytest-cov coverage object cov = get_cov(config) if cov: # If present, copy the data file location used by pytest-cov data_file = os.path.abspath(cov.config.data_file) else: # Fall back on output_loc and current dir if not data_file = os.path.abspath(os.path.join(output_loc or os.getcwd(), '.coverage')) # Get options from pytest-cov's command line arguments: source = config.option.cov_source config_file = config.option.cov_config if isinstance(config_file, str) and os.path.isfile(config_file): config_file = os.path.abspath(config_file) # Copy the suffix of plugin if available suffix = _make_suffix(cov) if suffix is True: # Cannot merge data with autogen suffix, so turn off warning # for missing data in pytest-cov collector cov._warn_no_data = False # Build setup command and execute in kernel: cmd = _python_setup % (data_file, source, config_file, suffix) msg_id = kernel.kc.execute(cmd, stop_on_error=False) kernel.await_idle(msg_id, 60) # A minute should be plenty to enable coverage else: config.warn( 'C1', 'Coverage currently not supported for language "%s".' % language, floc) return
<SYSTEM_TASK:> Finish coverage reporting in kernel. <END_TASK> <USER_TASK:> Description: def teardown_coverage(config, kernel, output_loc=None): """Finish coverage reporting in kernel. The coverage should previously have been started with setup_coverage. """
language = kernel.language if language.startswith('python'): # Teardown code does not require any input, simply execute: msg_id = kernel.kc.execute(_python_teardown) kernel.await_idle(msg_id, 60) # A minute should be plenty to write out coverage # Ensure we merge our data into parent data of pytest-cov, if possible cov = get_cov(config) _merge_nbval_coverage_data(cov) else: # Warnings should be given on setup, or there might be no teardown # for a specific language, so do nothing here pass
<SYSTEM_TASK:> Returns the coverage object of pytest-cov. <END_TASK> <USER_TASK:> Description: def get_cov(config): """Returns the coverage object of pytest-cov."""
# Check with hasplugin to avoid getplugin exception in older pytest. if config.pluginmanager.hasplugin('_cov'): plugin = config.pluginmanager.getplugin('_cov') if plugin.cov_controller: return plugin.cov_controller.cov return None
<SYSTEM_TASK:> Create a suffix for nbval data file depending on pytest-cov config. <END_TASK> <USER_TASK:> Description: def _make_suffix(cov): """Create a suffix for nbval data file depending on pytest-cov config."""
# Check if coverage object has data_suffix: if cov and cov.data_suffix is not None: # If True, the suffix will be autogenerated by coverage.py. # The suffixed data files will be automatically combined later. if cov.data_suffix is True: return True # Has a suffix, but we add our own extension return cov.data_suffix + '.nbval' return 'nbval'
<SYSTEM_TASK:> Put the texts in `items` in an HTML table, `widths` are the widths of the columns in %. <END_TASK> <USER_TASK:> Description: def text2html_table(items:Collection[Collection[str]])->str: "Put the texts in `items` in an HTML table, `widths` are the widths of the columns in %." html_code = f"""<table border="1" class="dataframe">"""
html_code += f""" <thead>\n <tr style="text-align: right;">\n""" for i in items[0]: html_code += f" <th>{_treat_html(i)}</th>" html_code += f" </tr>\n </thead>\n <tbody>" html_code += " <tbody>" for line in items[1:]: html_code += " <tr>" for i in line: html_code += f" <td>{_treat_html(i)}</td>" html_code += " </tr>" html_code += " </tbody>\n</table>" return html_code
<SYSTEM_TASK:> Changes Scikit learn's random forests to give each tree a random sample of <END_TASK> <USER_TASK:> Description: def set_rf_samples(n): """ Changes Scikit learn's random forests to give each tree a random sample of n random rows. """
forest._generate_sample_indices = (lambda rs, n_samples: forest.check_random_state(rs).randint(0, n_samples, n))
<SYSTEM_TASK:> Undoes the changes produced by set_rf_samples. <END_TASK> <USER_TASK:> Description: def reset_rf_samples(): """ Undoes the changes produced by set_rf_samples. """
forest._generate_sample_indices = (lambda rs, n_samples: forest.check_random_state(rs).randint(0, n_samples, n_samples))
<SYSTEM_TASK:> Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of <END_TASK> <USER_TASK:> Description: def show_top_losses(self, k:int, max_len:int=70)->None: """ Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of actual class. `max_len` is the maximum number of tokens displayed. """
from IPython.display import display, HTML items = [] tl_val,tl_idx = self.top_losses() for i,idx in enumerate(tl_idx): if k <= 0: break k -= 1 tx,cl = self.data.dl(self.ds_type).dataset[idx] cl = cl.data classes = self.data.classes txt = ' '.join(tx.text.split(' ')[:max_len]) if max_len is not None else tx.text tmp = [txt, f'{classes[self.pred_class[idx]]}', f'{classes[cl]}', f'{self.losses[idx]:.2f}', f'{self.probs[idx][cl]:.2f}'] items.append(tmp) items = np.array(items) names = ['Text', 'Prediction', 'Actual', 'Loss', 'Probability'] df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names) with pd.option_context('display.max_colwidth', -1): display(HTML(df.to_html(index=False)))
<SYSTEM_TASK:> create new OrderedDict that does not contain `module.` <END_TASK> <USER_TASK:> Description: def remove_module_load(state_dict): """create new OrderedDict that does not contain `module.`"""
new_state_dict = OrderedDict() for k, v in state_dict.items(): new_state_dict[k[7:]] = v return new_state_dict
<SYSTEM_TASK:> Wraps h in new Variables, to detach them from their history. <END_TASK> <USER_TASK:> Description: def repackage_var(h): """Wraps h in new Variables, to detach them from their history."""
if IS_TORCH_04: return h.detach() if type(h) == torch.Tensor else tuple(repackage_var(v) for v in h) else: return Variable(h.data) if type(h) == Variable else tuple(repackage_var(v) for v in h)
<SYSTEM_TASK:> Returns a SequentialRNN model. <END_TASK> <USER_TASK:> Description: def get_language_model(n_tok, emb_sz, n_hid, n_layers, pad_token, dropout=0.4, dropouth=0.3, dropouti=0.5, dropoute=0.1, wdrop=0.5, tie_weights=True, qrnn=False, bias=False): """Returns a SequentialRNN model. A RNN_Encoder layer is instantiated using the parameters provided. This is followed by the creation of a LinearDecoder layer. Also by default (i.e. tie_weights = True), the embedding matrix used in the RNN_Encoder is used to instantiate the weights for the LinearDecoder layer. The SequentialRNN layer is the native torch's Sequential wrapper that puts the RNN_Encoder and LinearDecoder layers sequentially in the model. Args: n_tok (int): number of unique vocabulary words (or tokens) in the source dataset emb_sz (int): the embedding size to use to encode each token n_hid (int): number of hidden activation per LSTM layer n_layers (int): number of LSTM layers to use in the architecture pad_token (int): the int value used for padding text. dropouth (float): dropout to apply to the activations going from one LSTM layer to another dropouti (float): dropout to apply to the input layer. dropoute (float): dropout to apply to the embedding layer. wdrop (float): dropout used for a LSTM's internal (or hidden) recurrent weights. tie_weights (bool): decide if the weights of the embedding matrix in the RNN encoder should be tied to the weights of the LinearDecoder layer. qrnn (bool): decide if the model is composed of LSTMS (False) or QRNNs (True). bias (bool): decide if the decoder should have a bias layer or not. Returns: A SequentialRNN model """
rnn_enc = RNN_Encoder(n_tok, emb_sz, n_hid=n_hid, n_layers=n_layers, pad_token=pad_token, dropouth=dropouth, dropouti=dropouti, dropoute=dropoute, wdrop=wdrop, qrnn=qrnn) enc = rnn_enc.encoder if tie_weights else None return SequentialRNN(rnn_enc, LinearDecoder(n_tok, emb_sz, dropout, tie_encoder=enc, bias=bias))
<SYSTEM_TASK:> This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called <END_TASK> <USER_TASK:> Description: def predict_array(self, arr): """ This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called with precompute set to true Args: arr: a numpy array to be used as input to the model for prediction purposes Returns: a numpy array containing the predictions from the model """
precompute = self.precompute self.precompute = False pred = super().predict_array(arr) self.precompute = precompute return pred
<SYSTEM_TASK:> Plots images given image files. <END_TASK> <USER_TASK:> Description: def plots_from_files(imspaths, figsize=(10,5), rows=1, titles=None, maintitle=None): """Plots images given image files. Arguments: im_paths (list): list of paths figsize (tuple): figure size rows (int): number of rows titles (list): list of titles maintitle (string): main title """
f = plt.figure(figsize=figsize) if maintitle is not None: plt.suptitle(maintitle, fontsize=16) for i in range(len(imspaths)): sp = f.add_subplot(rows, ceildiv(len(imspaths), rows), i+1) sp.axis('Off') if titles is not None: sp.set_title(titles[i], fontsize=16) img = plt.imread(imspaths[i]) plt.imshow(img)
<SYSTEM_TASK:> Displays the images and their probabilities of belonging to a certain class <END_TASK> <USER_TASK:> Description: def plot_val_with_title(self, idxs, y): """ Displays the images and their probabilities of belonging to a certain class Arguments: idxs (numpy.ndarray): indexes of the image samples from the dataset y (int): the selected class Returns: Plots the images in n rows [rows = n] """
# if there are any samples to be displayed if len(idxs) > 0: imgs = np.stack([self.ds[x][0] for x in idxs]) title_probs = [self.probs[x,y] for x in idxs] return plots(self.ds.denorm(imgs), rows=1, titles=title_probs) # if idxs is empty return false else: return False;
<SYSTEM_TASK:> Extracts the first 4 most uncertain indexes from the ordered list of probabilities <END_TASK> <USER_TASK:> Description: def most_uncertain_by_mask(self, mask, y): """ Extracts the first 4 most uncertain indexes from the ordered list of probabilities Arguments: mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else y (int): the selected class Returns: idxs (ndarray): An array of indexes of length 4 """
idxs = np.where(mask)[0] # the most uncertain samples will have abs(probs-1/num_classes) close to 0; return idxs[np.argsort(np.abs(self.probs[idxs,y]-(1/self.num_classes)))[:4]]
<SYSTEM_TASK:> Perform any of 8 permutations of 90-degrees rotations or flips for image x. <END_TASK> <USER_TASK:> Description: def dihedral(x, dih): """ Perform any of 8 permutations of 90-degrees rotations or flips for image x. """
x = np.rot90(x, dih%4) return x if dih<4 else np.fliplr(x)
<SYSTEM_TASK:> Adjust image balance and contrast <END_TASK> <USER_TASK:> Description: def lighting(im, b, c): """ Adjust image balance and contrast """
if b==0 and c==1: return im mu = np.average(im) return np.clip((im-mu)*c+mu+b,0.,1.).astype(np.float32)
<SYSTEM_TASK:> Convert mask YY to a bounding box, assumes 0 as background nonzero object <END_TASK> <USER_TASK:> Description: def to_bb(YY, y="deprecated"): """Convert mask YY to a bounding box, assumes 0 as background nonzero object"""
cols,rows = np.nonzero(YY) if len(cols)==0: return np.zeros(4, dtype=np.float32) top_row = np.min(rows) left_col = np.min(cols) bottom_row = np.max(rows) right_col = np.max(cols) return np.array([left_col, top_row, right_col, bottom_row], dtype=np.float32)
<SYSTEM_TASK:> Generate a standard set of transformations <END_TASK> <USER_TASK:> Description: def image_gen(normalizer, denorm, sz, tfms=None, max_zoom=None, pad=0, crop_type=None, tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT, scale=None): """ Generate a standard set of transformations Arguments --------- normalizer : image normalizing function denorm : image denormalizing function sz : size, sz_y = sz if not specified. tfms : iterable collection of transformation functions max_zoom : float, maximum zoom pad : int, padding on top, left, right and bottom crop_type : crop type tfm_y : y axis specific transformations sz_y : y size, height pad_mode : cv2 padding style: repeat, reflect, etc. Returns ------- type : ``Transforms`` transformer for specified image operations. See Also -------- Transforms: the transformer object returned by this function """
if tfm_y is None: tfm_y=TfmType.NO if tfms is None: tfms=[] elif not isinstance(tfms, collections.Iterable): tfms=[tfms] if sz_y is None: sz_y = sz if scale is None: scale = [RandomScale(sz, max_zoom, tfm_y=tfm_y, sz_y=sz_y) if max_zoom is not None else Scale(sz, tfm_y, sz_y=sz_y)] elif not is_listy(scale): scale = [scale] if pad: scale.append(AddPadding(pad, mode=pad_mode)) if crop_type!=CropType.GOOGLENET: tfms=scale+tfms return Transforms(sz, tfms, normalizer, denorm, crop_type, tfm_y=tfm_y, sz_y=sz_y)
<SYSTEM_TASK:> Given the statistics of the training image sets, returns separate training and validation transform functions <END_TASK> <USER_TASK:> Description: def tfms_from_stats(stats, sz, aug_tfms=None, max_zoom=None, pad=0, crop_type=CropType.RANDOM, tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT, norm_y=True, scale=None): """ Given the statistics of the training image sets, returns separate training and validation transform functions """
if aug_tfms is None: aug_tfms=[] tfm_norm = Normalize(*stats, tfm_y=tfm_y if norm_y else TfmType.NO) if stats is not None else None tfm_denorm = Denormalize(*stats) if stats is not None else None val_crop = CropType.CENTER if crop_type in (CropType.RANDOM,CropType.GOOGLENET) else crop_type val_tfm = image_gen(tfm_norm, tfm_denorm, sz, pad=pad, crop_type=val_crop, tfm_y=tfm_y, sz_y=sz_y, scale=scale) trn_tfm = image_gen(tfm_norm, tfm_denorm, sz, pad=pad, crop_type=crop_type, tfm_y=tfm_y, sz_y=sz_y, tfms=aug_tfms, max_zoom=max_zoom, pad_mode=pad_mode, scale=scale) return trn_tfm, val_tfm
<SYSTEM_TASK:> iterate through all the columns of a dataframe and modify the data type <END_TASK> <USER_TASK:> Description: def reduce_mem_usage(df): """ iterate through all the columns of a dataframe and modify the data type to reduce memory usage. """
start_mem = df.memory_usage().sum() / 1024**2 print('Memory usage of dataframe is {:.2f} MB'.format(start_mem)) #Removed from debugging columns = df.columns #.drop('index') for col in columns: col_type = df[col].dtype if str(col_type) != 'category' and col_type != 'datetime64[ns]' and col_type != bool: if col_type != object: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: #if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: #df[col] = df[col].astype(np.float16) #Sometimes causes and error and had to remove if c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: print('Error '+col+' Value would be a float64. Disregarding.') else: df[col] = df[col].astype('category') end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df
<SYSTEM_TASK:> Fit the `model` on `data` and learn using `loss_func` and `opt`. <END_TASK> <USER_TASK:> Description: def fit(epochs:int, learn:BasicLearner, callbacks:Optional[CallbackList]=None, metrics:OptMetrics=None)->None: "Fit the `model` on `data` and learn using `loss_func` and `opt`." assert len(learn.data.train_dl) != 0, f"""Your training dataloader is empty, can't train a model. Use a smaller batch size (batch size={learn.data.train_dl.batch_size} for {len(learn.data.train_dl.dataset)} elements)."""
cb_handler = CallbackHandler(callbacks, metrics) pbar = master_bar(range(epochs)) cb_handler.on_train_begin(epochs, pbar=pbar, metrics=metrics) exception=False try: for epoch in pbar: learn.model.train() cb_handler.set_dl(learn.data.train_dl) cb_handler.on_epoch_begin() for xb,yb in progress_bar(learn.data.train_dl, parent=pbar): xb, yb = cb_handler.on_batch_begin(xb, yb) loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler) if cb_handler.on_batch_end(loss): break if not cb_handler.skip_validate and not learn.data.empty_val: val_loss = validate(learn.model, learn.data.valid_dl, loss_func=learn.loss_func, cb_handler=cb_handler, pbar=pbar) else: val_loss=None if cb_handler.on_epoch_end(val_loss): break except Exception as e: exception = e raise finally: cb_handler.on_train_end(exception)
<SYSTEM_TASK:> Takes in text tokens and returns int2tok and tok2int converters <END_TASK> <USER_TASK:> Description: def numericalize_tok(tokens, max_vocab=50000, min_freq=0, unk_tok="_unk_", pad_tok="_pad_", bos_tok="_bos_", eos_tok="_eos_"): """Takes in text tokens and returns int2tok and tok2int converters Arguments: tokens(list): List of tokens. Can be a list of strings, or a list of lists of strings. max_vocab(int): Number of tokens to return in the vocab (sorted by frequency) min_freq(int): Minimum number of instances a token must be present in order to be preserved. unk_tok(str): Token to use when unknown tokens are encountered in the source text. pad_tok(str): Token to use when padding sequences. """
if isinstance(tokens, str): raise ValueError("Expected to receive a list of tokens. Received a string instead") if isinstance(tokens[0], list): tokens = [p for o in tokens for p in o] freq = Counter(tokens) int2tok = [o for o,c in freq.most_common(max_vocab) if c>min_freq] unk_id = 3 int2tok.insert(0, bos_tok) int2tok.insert(1, pad_tok) int2tok.insert(2, eos_tok) int2tok.insert(unk_id, unk_tok) tok2int = collections.defaultdict(lambda:unk_id, {v:k for k,v in enumerate(int2tok)}) return int2tok, tok2int
<SYSTEM_TASK:> Start a new kernel, and return its Manager and Client <END_TASK> <USER_TASK:> Description: def start_new_kernel(startup_timeout=60, kernel_name='python', **kwargs): """Start a new kernel, and return its Manager and Client"""
logger.debug('Starting new kernel: "%s"' % kernel_name) km = KernelManager(kernel_name=kernel_name, kernel_spec_manager=NbvalKernelspecManager()) km.start_kernel(**kwargs) kc = km.client() kc.start_channels() try: kc.wait_for_ready(timeout=startup_timeout) except RuntimeError: logger.exception('Failure starting kernel "%s"', kernel_name) kc.stop_channels() km.shutdown_kernel() raise return km, kc
<SYSTEM_TASK:> Function is used to get a message from the iopub channel. <END_TASK> <USER_TASK:> Description: def get_message(self, stream, timeout=None): """ Function is used to get a message from the iopub channel. Timeout is None by default When timeout is reached """
try: if stream == 'iopub': msg = self.kc.get_iopub_msg(timeout=timeout) elif stream == 'shell': msg = self.kc.get_shell_msg(timeout=timeout) else: raise ValueError('Invalid stream specified: "%s"' % stream) except Empty: logger.debug('Kernel: Timeout waiting for message on %s', stream) raise logger.debug("Kernel message (%s):\n%s", stream, pformat(msg)) return msg
<SYSTEM_TASK:> Executes a string of python code in cell input. <END_TASK> <USER_TASK:> Description: def execute_cell_input(self, cell_input, allow_stdin=None): """ Executes a string of python code in cell input. We do not allow the kernel to make requests to the stdin this is the norm for notebooks Function returns a unique message id of the reply from the kernel. """
if cell_input: logger.debug('Executing cell: "%s"...', cell_input.splitlines()[0][:40]) else: logger.debug('Executing empty cell') return self.kc.execute(cell_input, allow_stdin=allow_stdin, stop_on_error=False)
<SYSTEM_TASK:> Poll the iopub stream until an idle message is received for the given parent ID <END_TASK> <USER_TASK:> Description: def await_idle(self, parent_id, timeout): """Poll the iopub stream until an idle message is received for the given parent ID"""
while True: # Get a message from the kernel iopub channel msg = self.get_message(timeout=timeout, stream='iopub') # raises Empty on timeout! if msg['parent_header'].get('msg_id') != parent_id: continue if msg['msg_type'] == 'status': if msg['content']['execution_state'] == 'idle': break
<SYSTEM_TASK:> Instructs the kernel process to stop channels <END_TASK> <USER_TASK:> Description: def stop(self): """ Instructs the kernel process to stop channels and the kernel manager to then shutdown the process. """
logger.debug('Stopping kernel') self.kc.stop_channels() self.km.shutdown_kernel(now=True) del self.km
<SYSTEM_TASK:> Enlarge or shrink a single image to scale, such that the smaller of the height or width dimension is equal to targ. <END_TASK> <USER_TASK:> Description: def resize_img(fname, targ, path, new_path, fn=None): """ Enlarge or shrink a single image to scale, such that the smaller of the height or width dimension is equal to targ. """
if fn is None: fn = resize_fn(targ) dest = os.path.join(path_for(path, new_path, targ), fname) if os.path.exists(dest): return im = Image.open(os.path.join(path, fname)).convert('RGB') os.makedirs(os.path.split(dest)[0], exist_ok=True) fn(im).save(dest)
<SYSTEM_TASK:> Returns a list of relative file paths to `path` for all files within `folder` <END_TASK> <USER_TASK:> Description: def read_dir(path, folder): """ Returns a list of relative file paths to `path` for all files within `folder` """
full_path = os.path.join(path, folder) fnames = glob(f"{full_path}/*.*") directories = glob(f"{full_path}/*/") if any(fnames): return [os.path.relpath(f,path) for f in fnames] elif any(directories): raise FileNotFoundError("{} has subdirectories but contains no files. Is your directory structure is correct?".format(full_path)) else: raise FileNotFoundError("{} folder doesn't exist or is empty".format(full_path))
<SYSTEM_TASK:> Parse filenames and label sets from a CSV file. <END_TASK> <USER_TASK:> Description: def parse_csv_labels(fn, skip_header=True, cat_separator = ' '): """Parse filenames and label sets from a CSV file. This method expects that the csv file at path :fn: has two columns. If it has a header, :skip_header: should be set to True. The labels in the label set are expected to be space separated. Arguments: fn: Path to a CSV file. skip_header: A boolean flag indicating whether to skip the header. Returns: a two-tuple of ( image filenames, a dictionary of filenames and corresponding labels ) . :param cat_separator: the separator for the categories column """
df = pd.read_csv(fn, index_col=0, header=0 if skip_header else None, dtype=str) fnames = df.index.values df.iloc[:,0] = df.iloc[:,0].str.split(cat_separator) return fnames, list(df.to_dict().values())[0]
<SYSTEM_TASK:> Opens an image using OpenCV given the file path. <END_TASK> <USER_TASK:> Description: def open_image(fn): """ Opens an image using OpenCV given the file path. Arguments: fn: the file path of the image Returns: The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0 """
flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR if not os.path.exists(fn) and not str(fn).startswith("http"): raise OSError('No such file or directory: {}'.format(fn)) elif os.path.isdir(fn) and not str(fn).startswith("http"): raise OSError('Is a directory: {}'.format(fn)) elif isdicom(fn): slice = pydicom.read_file(fn) if slice.PhotometricInterpretation.startswith('MONOCHROME'): # Make a fake RGB image im = np.stack([slice.pixel_array]*3,-1) return im / ((1 << slice.BitsStored)-1) else: # No support for RGB yet, as it involves various color spaces. # It shouldn't be too difficult to add though, if needed. raise OSError('Unsupported DICOM image with PhotometricInterpretation=={}'.format(slice.PhotometricInterpretation)) else: #res = np.array(Image.open(fn), dtype=np.float32)/255 #if len(res.shape)==2: res = np.repeat(res[...,None],3,2) #return res try: if str(fn).startswith("http"): req = urllib.urlopen(str(fn)) image = np.asarray(bytearray(req.read()), dtype="uint8") im = cv2.imdecode(image, flags).astype(np.float32)/255 else: im = cv2.imread(str(fn), flags).astype(np.float32)/255 if im is None: raise OSError(f'File not recognized by opencv: {fn}') return cv2.cvtColor(im, cv2.COLOR_BGR2RGB) except Exception as e: raise OSError('Error handling image at: {}'.format(fn)) from e
<SYSTEM_TASK:> Reverse the normalization done to a batch of images. <END_TASK> <USER_TASK:> Description: def denorm(self,arr): """Reverse the normalization done to a batch of images. Arguments: arr: of shape/size (N,3,sz,sz) """
if type(arr) is not np.ndarray: arr = to_np(arr) if len(arr.shape)==3: arr = arr[None] return self.transform.denorm(np.rollaxis(arr,1,4))
<SYSTEM_TASK:> Return a copy of this dataset resized <END_TASK> <USER_TASK:> Description: def resized(self, dl, targ, new_path, resume = True, fn=None): """ Return a copy of this dataset resized """
return dl.dataset.resize_imgs(targ, new_path, resume=resume, fn=fn) if dl else None
<SYSTEM_TASK:> Resizes all the images in the train, valid, test folders to a given size. <END_TASK> <USER_TASK:> Description: def resize(self, targ_sz, new_path='tmp', resume=True, fn=None): """ Resizes all the images in the train, valid, test folders to a given size. Arguments: targ_sz (int): the target size new_path (str): the path to save the resized images (default tmp) resume (bool): if True, check for images in the DataSet that haven't been resized yet (useful if a previous resize operation was aborted) fn (function): optional custom resizing function """
new_ds = [] dls = [self.trn_dl,self.val_dl,self.fix_dl,self.aug_dl] if self.test_dl: dls += [self.test_dl, self.test_aug_dl] else: dls += [None,None] t = tqdm_notebook(dls) for dl in t: new_ds.append(self.resized(dl, targ_sz, new_path, resume, fn)) t.close() return self.__class__(new_ds[0].path, new_ds, self.bs, self.num_workers, self.classes)
<SYSTEM_TASK:> Read in images and their labels given as numpy arrays <END_TASK> <USER_TASK:> Description: def from_arrays(cls, path, trn, val, bs=64, tfms=(None,None), classes=None, num_workers=4, test=None, continuous=False): """ Read in images and their labels given as numpy arrays Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) trn: a tuple of training data matrix and target label/classification array (e.g. `trn=(x,y)` where `x` has the shape of `(5000, 784)` and `y` has the shape of `(5000,)`) val: a tuple of validation data matrix and target label/classification array. bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` classes: a list of all labels/classifications num_workers: a number of workers test: a matrix of test data (the shape should match `trn[0]`) Returns: ImageClassifierData """
f = ArraysIndexRegressionDataset if continuous else ArraysIndexDataset datasets = cls.get_ds(f, trn, val, tfms, test=test) return cls(path, datasets, bs, num_workers, classes=classes)
<SYSTEM_TASK:> Read in images and their labels given as sub-folder names <END_TASK> <USER_TASK:> Description: def from_paths(cls, path, bs=64, tfms=(None,None), trn_name='train', val_name='valid', test_name=None, test_with_labels=False, num_workers=8): """ Read in images and their labels given as sub-folder names Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` trn_name: a name of the folder that contains training images. val_name: a name of the folder that contains validation images. test_name: a name of the folder that contains test images. num_workers: number of workers Returns: ImageClassifierData """
assert not(tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets" trn,val = [folder_source(path, o) for o in (trn_name, val_name)] if test_name: test = folder_source(path, test_name) if test_with_labels else read_dir(path, test_name) else: test = None datasets = cls.get_ds(FilesIndexArrayDataset, trn, val, tfms, path=path, test=test) return cls(path, datasets, bs, num_workers, classes=trn[2])
<SYSTEM_TASK:> Read in images and their labels given as a CSV file. <END_TASK> <USER_TASK:> Description: def from_csv(cls, path, folder, csv_fname, bs=64, tfms=(None,None), val_idxs=None, suffix='', test_name=None, continuous=False, skip_header=True, num_workers=8, cat_separator=' '): """ Read in images and their labels given as a CSV file. This method should be used when training image labels are given in an CSV file as opposed to sub-directories with label names. Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) folder: a name of the folder in which training images are contained. csv_fname: a name of the CSV file which contains target labels. bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`. If None, default arguments to get_cv_idxs are used. suffix: suffix to add to image names in CSV file (sometimes CSV only contains the file name without file extension e.g. '.jpg' - in which case, you can set suffix as '.jpg') test_name: a name of the folder which contains test images. continuous: if True, the data set is used to train regression models. If False, it is used to train classification models. skip_header: skip the first row of the CSV file. num_workers: number of workers cat_separator: Labels category separator Returns: ImageClassifierData """
assert not (tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets" assert not (os.path.isabs(folder)), "folder needs to be a relative path" fnames,y,classes = csv_source(folder, csv_fname, skip_header, suffix, continuous=continuous, cat_separator=cat_separator) return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name, num_workers=num_workers, suffix=suffix, tfms=tfms, bs=bs, continuous=continuous)
<SYSTEM_TASK:> Read in images given a sub-folder and their labels given a numpy array <END_TASK> <USER_TASK:> Description: def from_path_and_array(cls, path, folder, y, classes=None, val_idxs=None, test_name=None, num_workers=8, tfms=(None,None), bs=64): """ Read in images given a sub-folder and their labels given a numpy array Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) folder: a name of the folder in which training images are contained. y: numpy array which contains target labels ordered by filenames. bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`. If None, default arguments to get_cv_idxs are used. test_name: a name of the folder which contains test images. num_workers: number of workers Returns: ImageClassifierData """
assert not (tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets" assert not (os.path.isabs(folder)), "folder needs to be a relative path" fnames = np.core.defchararray.add(f'{folder}/', sorted(os.listdir(f'{path}{folder}'))) return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name, num_workers=num_workers, tfms=tfms, bs=bs)
<SYSTEM_TASK:> Reclaim GPU RAM if CUDA out of memory happened, or execution was interrupted <END_TASK> <USER_TASK:> Description: def gpu_mem_restore(func): "Reclaim GPU RAM if CUDA out of memory happened, or execution was interrupted" @functools.wraps(func) def wrapper(*args, **kwargs): tb_clear_frames = os.environ.get('FASTAI_TB_CLEAR_FRAMES', None) if not IS_IN_IPYTHON or tb_clear_frames=="0": return func(*args, **kwargs) try: return func(*args, **kwargs) except Exception as e: if ("CUDA out of memory" in str(e) or "device-side assert triggered" in str(e) or tb_clear_frames == "1"): type, val, tb = get_ref_free_exc_info() # must! gc.collect() if "device-side assert triggered" in str(e): warn("""When 'device-side assert triggered' error happens, it's not possible to recover and you must restart the kernel to continue. Use os.environ['CUDA_LAUNCH_BLOCKING']="1" before restarting to debug"""
) raise type(val).with_traceback(tb) from None else: raise # re-raises the exact last exception return wrapper
<SYSTEM_TASK:> Look through the cell source for comments which affect nbval's behaviour <END_TASK> <USER_TASK:> Description: def find_comment_markers(cellsource): """Look through the cell source for comments which affect nbval's behaviour Yield an iterable of ``(MARKER_TYPE, True)``. """
found = {} for line in cellsource.splitlines(): line = line.strip() if line.startswith('#'): # print("Found comment in '{}'".format(line)) comment = line.lstrip('#').strip() if comment in comment_markers: # print("Found marker {}".format(comment)) marker = comment_markers[comment] if not isinstance(marker, tuple): # If not an explicit tuple ('option', True/False), # imply ('option', True) marker = (marker, True) marker_type = marker[0] if marker_type in found: warnings.warn( "Conflicting comment markers found, using the latest: " " %s VS %s" % (found[marker_type], comment)) found[marker_type] = comment yield marker
<SYSTEM_TASK:> Merge all stream outputs with shared names into single streams <END_TASK> <USER_TASK:> Description: def coalesce_streams(outputs): """ Merge all stream outputs with shared names into single streams to ensure deterministic outputs. Parameters ---------- outputs : iterable of NotebookNodes Outputs being processed """
if not outputs: return outputs new_outputs = [] streams = {} for output in outputs: if (output.output_type == 'stream'): if output.name in streams: streams[output.name].text += output.text else: new_outputs.append(output) streams[output.name] = output else: new_outputs.append(output) # process \r and \b characters for output in streams.values(): old = output.text while len(output.text) < len(old): old = output.text # Cancel out anything-but-newline followed by backspace output.text = backspace_pat.sub('', output.text) # Replace all carriage returns not followed by newline output.text = carriagereturn_pat.sub('', output.text) return new_outputs
<SYSTEM_TASK:> Makes failure output for streams better by having key be the stream name <END_TASK> <USER_TASK:> Description: def transform_streams_for_comparison(outputs): """Makes failure output for streams better by having key be the stream name"""
new_outputs = [] for output in outputs: if (output.output_type == 'stream'): # Transform output new_outputs.append({ 'output_type': 'stream', output.name: output.text, }) else: new_outputs.append(output) return new_outputs
<SYSTEM_TASK:> Intent each line with indent <END_TASK> <USER_TASK:> Description: def _indent(s, indent=' '): """Intent each line with indent"""
if isinstance(s, six.string_types): return '\n'.join(('%s%s' % (indent, line) for line in s.splitlines())) return s
<SYSTEM_TASK:> Called by pytest to setup the collector cells in . <END_TASK> <USER_TASK:> Description: def setup(self): """ Called by pytest to setup the collector cells in . Here we start a kernel and setup the sanitize patterns. """
if self.parent.config.option.current_env: kernel_name = CURRENT_ENV_KERNEL_NAME else: kernel_name = self.nb.metadata.get( 'kernelspec', {}).get('name', 'python') self.kernel = RunningKernel(kernel_name, str(self.fspath.dirname)) self.setup_sanitize_files() if getattr(self.parent.config.option, 'cov_source', None): setup_coverage(self.parent.config, self.kernel, getattr(self, "fspath", None))
<SYSTEM_TASK:> For each of the sanitize files that were specified as command line options <END_TASK> <USER_TASK:> Description: def setup_sanitize_files(self): """ For each of the sanitize files that were specified as command line options load the contents of the file into the sanitise patterns dictionary. """
for fname in self.get_sanitize_files(): with open(fname, 'r') as f: self.sanitize_patterns.update(get_sanitize_patterns(f.read()))
<SYSTEM_TASK:> Return list of all sanitize files provided by the user on the command line. <END_TASK> <USER_TASK:> Description: def get_sanitize_files(self): """ Return list of all sanitize files provided by the user on the command line. N.B.: We only support one sanitize file at the moment, but this is likely to change in the future """
if self.parent.config.option.sanitize_with is not None: return [self.parent.config.option.sanitize_with] else: return []
<SYSTEM_TASK:> Gets a message from the iopub channel of the notebook kernel. <END_TASK> <USER_TASK:> Description: def get_kernel_message(self, timeout=None, stream='iopub'): """ Gets a message from the iopub channel of the notebook kernel. """
return self.kernel.get_message(stream, timeout=timeout)
<SYSTEM_TASK:> The collect function is required by pytest and is used to yield pytest <END_TASK> <USER_TASK:> Description: def collect(self): """ The collect function is required by pytest and is used to yield pytest Item objects. We specify an Item for each code cell in the notebook. """
self.nb = nbformat.read(str(self.fspath), as_version=4) # Start the cell count cell_num = 0 # Iterate over the cells in the notebook for cell in self.nb.cells: # Skip the cells that have text, headings or related stuff # Only test code cells if cell.cell_type == 'code': # The cell may contain a comment indicating that its output # should be checked or ignored. If it doesn't, use the default # behaviour. The --nbval option checks unmarked cells. with warnings.catch_warnings(record=True) as ws: options = defaultdict(bool, find_metadata_tags(cell.metadata)) comment_opts = dict(find_comment_markers(cell.source)) if set(comment_opts.keys()) & set(options.keys()): warnings.warn( "Overlapping options from comments and metadata, " "using options from comments: %s" % str(set(comment_opts.keys()) & set(options.keys()))) for w in ws: self.parent.config.warn( "C1", str(w.message), '%s:Cell %d' % ( getattr(self, "fspath", None), cell_num)) options.update(comment_opts) options.setdefault('check', self.compare_outputs) yield IPyNbCell('Cell ' + str(cell_num), self, cell_num, cell, options) # Update 'code' cell count cell_num += 1
<SYSTEM_TASK:> Format an output for printing <END_TASK> <USER_TASK:> Description: def format_output_compare(self, key, left, right): """Format an output for printing"""
if isinstance(left, six.string_types): left = _trim_base64(left) if isinstance(right, six.string_types): right = _trim_base64(right) cc = self.colors self.comparison_traceback.append( cc.OKBLUE + " mismatch '%s'" % key + cc.FAIL) # Use comparison repr from pytest: hook_result = self.ihook.pytest_assertrepr_compare( config=self.config, op='==', left=left, right=right) for new_expl in hook_result: if new_expl: new_expl = [' %s' % line.replace("\n", "\\n") for line in new_expl] self.comparison_traceback.append("\n assert reference_output == test_output failed:\n") self.comparison_traceback.extend(new_expl) break else: # Fallback repr: self.comparison_traceback.append( " <<<<<<<<<<<< Reference output from ipynb file:" + cc.ENDC) self.comparison_traceback.append(_indent(left)) self.comparison_traceback.append( cc.FAIL + ' ============ disagrees with newly computed (test) output:' + cc.ENDC) self.comparison_traceback.append(_indent(right)) self.comparison_traceback.append( cc.FAIL + ' >>>>>>>>>>>>') self.comparison_traceback.append(cc.ENDC)
<SYSTEM_TASK:> sanitize a string for comparison. <END_TASK> <USER_TASK:> Description: def sanitize(self, s): """sanitize a string for comparison. """
if not isinstance(s, six.string_types): return s """ re.sub matches a regex and replaces it with another. The regex replacements are taken from a file if the option is passed when py.test is called. Otherwise, the strings are not processed """ for regex, replace in six.iteritems(self.parent.sanitize_patterns): s = re.sub(regex, replace, s) return s
<SYSTEM_TASK:> convert iterable object into numpy array <END_TASK> <USER_TASK:> Description: def A(*a): """convert iterable object into numpy array"""
return np.array(a[0]) if len(a)==1 else [np.array(o) for o in a]
<SYSTEM_TASK:> Convert numpy array into a pytorch tensor. <END_TASK> <USER_TASK:> Description: def T(a, half=False, cuda=True): """ Convert numpy array into a pytorch tensor. if Cuda is available and USE_GPU=True, store resulting tensor in GPU. """
if not torch.is_tensor(a): a = np.array(np.ascontiguousarray(a)) if a.dtype in (np.int8, np.int16, np.int32, np.int64): a = torch.LongTensor(a.astype(np.int64)) elif a.dtype in (np.float32, np.float64): a = to_half(a) if half else torch.FloatTensor(a) else: raise NotImplementedError(a.dtype) if cuda: a = to_gpu(a) return a
<SYSTEM_TASK:> splits iterables a in equal parts of size sz <END_TASK> <USER_TASK:> Description: def partition(a, sz): """splits iterables a in equal parts of size sz"""
return [a[i:i+sz] for i in range(0, len(a), sz)]
<SYSTEM_TASK:> Method returns an instance of the LayerOptimizer class, which <END_TASK> <USER_TASK:> Description: def get_layer_opt(self, lrs, wds): """Method returns an instance of the LayerOptimizer class, which allows for setting differential learning rates for different parts of the model. An example of how a model maybe differentiated into different parts for application of differential learning rates and weight decays is seen in ../.../courses/dl1/fastai/conv_learner.py, using the dict 'model_meta'. Currently, this seems supported only for convolutional networks such as VGG-19, ResNet-XX etc. Args: lrs (float or list(float)): learning rate(s) for the model wds (float or list(float)): weight decay parameter(s). Returns: An instance of a LayerOptimizer """
return LayerOptimizer(self.opt_fn, self.get_layer_groups(), lrs, wds)
<SYSTEM_TASK:> Helps you find an optimal learning rate for a model. <END_TASK> <USER_TASK:> Description: def lr_find(self, start_lr=1e-5, end_lr=10, wds=None, linear=False, **kwargs): """Helps you find an optimal learning rate for a model. It uses the technique developed in the 2015 paper `Cyclical Learning Rates for Training Neural Networks`, where we simply keep increasing the learning rate from a very small value, until the loss starts decreasing. Args: start_lr (float/numpy array) : Passing in a numpy array allows you to specify learning rates for a learner's layer_groups end_lr (float) : The maximum learning rate to try. wds (iterable/float) Examples: As training moves us closer to the optimal weights for a model, the optimal learning rate will be smaller. We can take advantage of that knowledge and provide lr_find() with a starting learning rate 1000x smaller than the model's current learning rate as such: >> learn.lr_find(lr/1000) >> lrs = np.array([ 1e-4, 1e-3, 1e-2 ]) >> learn.lr_find(lrs / 1000) Notes: lr_find() may finish before going through each batch of examples if the loss decreases enough. .. _Cyclical Learning Rates for Training Neural Networks: http://arxiv.org/abs/1506.01186 """
self.save('tmp') layer_opt = self.get_layer_opt(start_lr, wds) self.sched = LR_Finder(layer_opt, len(self.data.trn_dl), end_lr, linear=linear) self.fit_gen(self.model, self.data, layer_opt, 1, **kwargs) self.load('tmp')
<SYSTEM_TASK:> Compute the means and stds of `self.cont_names` columns to normalize them. <END_TASK> <USER_TASK:> Description: def apply_train(self, df:DataFrame): "Compute the means and stds of `self.cont_names` columns to normalize them." self.means,self.stds = {},{} for n in self.cont_names: assert is_numeric_dtype(df[n]), (f"""Cannot normalize '{n}' column as it isn't numerical. Are you sure it doesn't belong in the categorical set of columns?"""
) self.means[n],self.stds[n] = df[n].mean(),df[n].std() df[n] = (df[n]-self.means[n]) / (1e-7 + self.stds[n])
<SYSTEM_TASK:> Load the classifier and int to string mapping <END_TASK> <USER_TASK:> Description: def load_model(itos_filename, classifier_filename, num_classes): """Load the classifier and int to string mapping Args: itos_filename (str): The filename of the int to string mapping file (usually called itos.pkl) classifier_filename (str): The filename of the trained classifier Returns: string to int mapping, trained classifer model """
# load the int to string mapping file itos = pickle.load(Path(itos_filename).open('rb')) # turn it into a string to int mapping (which is what we need) stoi = collections.defaultdict(lambda:0, {str(v):int(k) for k,v in enumerate(itos)}) # these parameters aren't used, but this is the easiest way to get a model bptt,em_sz,nh,nl = 70,400,1150,3 dps = np.array([0.4,0.5,0.05,0.3,0.4])*0.5 vs = len(itos) model = get_rnn_classifer(bptt, 20*70, num_classes, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1, layers=[em_sz*3, 50, num_classes], drops=[dps[4], 0.1], dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3]) # load the trained classifier model.load_state_dict(torch.load(classifier_filename, map_location=lambda storage, loc: storage)) # put the classifier into evaluation mode model.reset() model.eval() return stoi, model
<SYSTEM_TASK:> Do the actual prediction on the text using the <END_TASK> <USER_TASK:> Description: def predict_text(stoi, model, text): """Do the actual prediction on the text using the model and mapping files passed """
# prefix text with tokens: # xbos: beginning of sentence # xfld 1: we are using a single field here input_str = 'xbos xfld 1 ' + text # predictions are done on arrays of input. # We only have a single input, so turn it into a 1x1 array texts = [input_str] # tokenize using the fastai wrapper around spacy tok = Tokenizer().proc_all_mp(partition_by_cores(texts)) # turn into integers for each word encoded = [stoi[p] for p in tok[0]] # we want a [x,1] array where x is the number # of words inputted (including the prefix tokens) ary = np.reshape(np.array(encoded),(-1,1)) # turn this array into a tensor tensor = torch.from_numpy(ary) # wrap in a torch Variable variable = Variable(tensor) # do the predictions predictions = model(variable) # convert back to numpy numpy_preds = predictions[0].data.numpy() return softmax(numpy_preds[0])[0]
<SYSTEM_TASK:> Makes a W3C alwaysMatch capabilities object. <END_TASK> <USER_TASK:> Description: def _make_w3c_caps(caps): """Makes a W3C alwaysMatch capabilities object. Filters out capability names that are not in the W3C spec. Spec-compliant drivers will reject requests containing unknown capability names. Moves the Firefox profile, if present, from the old location to the new Firefox options object. :Args: - caps - A dictionary of capabilities requested by the caller. """
caps = copy.deepcopy(caps) profile = caps.get('firefox_profile') always_match = {} if caps.get('proxy') and caps['proxy'].get('proxyType'): caps['proxy']['proxyType'] = caps['proxy']['proxyType'].lower() for k, v in caps.items(): if v and k in _OSS_W3C_CONVERSION: always_match[_OSS_W3C_CONVERSION[k]] = v.lower() if k == 'platform' else v if k in _W3C_CAPABILITY_NAMES or ':' in k: always_match[k] = v if profile: moz_opts = always_match.get('moz:firefoxOptions', {}) # If it's already present, assume the caller did that intentionally. if 'profile' not in moz_opts: # Don't mutate the original capabilities. new_opts = copy.deepcopy(moz_opts) new_opts['profile'] = profile always_match['moz:firefoxOptions'] = new_opts return {"firstMatch": [{}], "alwaysMatch": always_match}
<SYSTEM_TASK:> Creates a new session with the desired capabilities. <END_TASK> <USER_TASK:> Description: def start_session(self, capabilities, browser_profile=None): """ Creates a new session with the desired capabilities. :Args: - browser_name - The name of the browser to request. - version - Which browser version to request. - platform - Which platform to request the browser on. - javascript_enabled - Whether the new session should support JavaScript. - browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested. """
if not isinstance(capabilities, dict): raise InvalidArgumentException("Capabilities must be a dictionary") if browser_profile: if "moz:firefoxOptions" in capabilities: capabilities["moz:firefoxOptions"]["profile"] = browser_profile.encoded else: capabilities.update({'firefox_profile': browser_profile.encoded}) w3c_caps = _make_w3c_caps(capabilities) parameters = {"capabilities": w3c_caps, "desiredCapabilities": capabilities} response = self.execute(Command.NEW_SESSION, parameters) if 'sessionId' not in response: response = response['value'] self.session_id = response['sessionId'] self.capabilities = response.get('value') # if capabilities is none we are probably speaking to # a W3C endpoint if self.capabilities is None: self.capabilities = response.get('capabilities') # Double check to see if we have a W3C Compliant browser self.w3c = response.get('status') is None self.command_executor.w3c = self.w3c
<SYSTEM_TASK:> Creates a web element with the specified `element_id`. <END_TASK> <USER_TASK:> Description: def create_web_element(self, element_id): """Creates a web element with the specified `element_id`."""
return self._web_element_cls(self, element_id, w3c=self.w3c)
<SYSTEM_TASK:> Sends a command to be executed by a command.CommandExecutor. <END_TASK> <USER_TASK:> Description: def execute(self, driver_command, params=None): """ Sends a command to be executed by a command.CommandExecutor. :Args: - driver_command: The name of the command to execute as a string. - params: A dictionary of named parameters to send with the command. :Returns: The command's JSON response loaded into a dictionary object. """
if self.session_id is not None: if not params: params = {'sessionId': self.session_id} elif 'sessionId' not in params: params['sessionId'] = self.session_id params = self._wrap_value(params) response = self.command_executor.execute(driver_command, params) if response: self.error_handler.check_response(response) response['value'] = self._unwrap_value( response.get('value', None)) return response # If the server doesn't send a response, assume the command was # a success return {'success': 0, 'value': None, 'sessionId': self.session_id}
<SYSTEM_TASK:> Finds an element by link text. <END_TASK> <USER_TASK:> Description: def find_element_by_link_text(self, link_text): """ Finds an element by link text. :Args: - link_text: The text of the element to be found. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_link_text('Sign In') """
return self.find_element(by=By.LINK_TEXT, value=link_text)
<SYSTEM_TASK:> Finds elements by link text. <END_TASK> <USER_TASK:> Description: def find_elements_by_link_text(self, text): """ Finds elements by link text. :Args: - link_text: The text of the elements to be found. :Returns: - list of webelement - a list with elements if any was found. an empty list if not :Usage: :: elements = driver.find_elements_by_link_text('Sign In') """
return self.find_elements(by=By.LINK_TEXT, value=text)
<SYSTEM_TASK:> Finds an element by a partial match of its link text. <END_TASK> <USER_TASK:> Description: def find_element_by_partial_link_text(self, link_text): """ Finds an element by a partial match of its link text. :Args: - link_text: The text of the element to partially match on. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_partial_link_text('Sign') """
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
<SYSTEM_TASK:> Finds elements by a partial match of their link text. <END_TASK> <USER_TASK:> Description: def find_elements_by_partial_link_text(self, link_text): """ Finds elements by a partial match of their link text. :Args: - link_text: The text of the element to partial match on. :Returns: - list of webelement - a list with elements if any was found. an empty list if not :Usage: :: elements = driver.find_elements_by_partial_link_text('Sign') """
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
<SYSTEM_TASK:> Finds elements by name. <END_TASK> <USER_TASK:> Description: def find_elements_by_name(self, name): """ Finds elements by name. :Args: - name: The name of the elements to find. :Returns: - list of webelement - a list with elements if any was found. an empty list if not :Usage: :: elements = driver.find_elements_by_name('foo') """
return self.find_elements(by=By.NAME, value=name)
<SYSTEM_TASK:> Finds an element by tag name. <END_TASK> <USER_TASK:> Description: def find_element_by_tag_name(self, name): """ Finds an element by tag name. :Args: - name - name of html tag (eg: h1, a, span) :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_tag_name('h1') """
return self.find_element(by=By.TAG_NAME, value=name)
<SYSTEM_TASK:> Finds elements by tag name. <END_TASK> <USER_TASK:> Description: def find_elements_by_tag_name(self, name): """ Finds elements by tag name. :Args: - name - name of html tag (eg: h1, a, span) :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: :: elements = driver.find_elements_by_tag_name('h1') """
return self.find_elements(by=By.TAG_NAME, value=name)
<SYSTEM_TASK:> Finds an element by class name. <END_TASK> <USER_TASK:> Description: def find_element_by_class_name(self, name): """ Finds an element by class name. :Args: - name: The class name of the element to find. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_class_name('foo') """
return self.find_element(by=By.CLASS_NAME, value=name)
<SYSTEM_TASK:> Finds elements by class name. <END_TASK> <USER_TASK:> Description: def find_elements_by_class_name(self, name): """ Finds elements by class name. :Args: - name: The class name of the elements to find. :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: :: elements = driver.find_elements_by_class_name('foo') """
return self.find_elements(by=By.CLASS_NAME, value=name)
<SYSTEM_TASK:> Finds an element by css selector. <END_TASK> <USER_TASK:> Description: def find_element_by_css_selector(self, css_selector): """ Finds an element by css selector. :Args: - css_selector - CSS selector string, ex: 'a.nav#home' :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_css_selector('#foo') """
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
<SYSTEM_TASK:> Finds elements by css selector. <END_TASK> <USER_TASK:> Description: def find_elements_by_css_selector(self, css_selector): """ Finds elements by css selector. :Args: - css_selector - CSS selector string, ex: 'a.nav#home' :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: :: elements = driver.find_elements_by_css_selector('.foo') """
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
<SYSTEM_TASK:> Quits the driver and closes every associated window. <END_TASK> <USER_TASK:> Description: def quit(self): """ Quits the driver and closes every associated window. :Usage: :: driver.quit() """
try: self.execute(Command.QUIT) finally: self.stop_client() self.command_executor.close()
<SYSTEM_TASK:> Returns the handle of the current window. <END_TASK> <USER_TASK:> Description: def current_window_handle(self): """ Returns the handle of the current window. :Usage: :: driver.current_window_handle """
if self.w3c: return self.execute(Command.W3C_GET_CURRENT_WINDOW_HANDLE)['value'] else: return self.execute(Command.GET_CURRENT_WINDOW_HANDLE)['value']
<SYSTEM_TASK:> Returns the handles of all windows within the current session. <END_TASK> <USER_TASK:> Description: def window_handles(self): """ Returns the handles of all windows within the current session. :Usage: :: driver.window_handles """
if self.w3c: return self.execute(Command.W3C_GET_WINDOW_HANDLES)['value'] else: return self.execute(Command.GET_WINDOW_HANDLES)['value']
<SYSTEM_TASK:> Maximizes the current window that webdriver is using <END_TASK> <USER_TASK:> Description: def maximize_window(self): """ Maximizes the current window that webdriver is using """
params = None command = Command.W3C_MAXIMIZE_WINDOW if not self.w3c: command = Command.MAXIMIZE_WINDOW params = {'windowHandle': 'current'} self.execute(command, params)
<SYSTEM_TASK:> Get a single cookie by name. Returns the cookie if found, None if not. <END_TASK> <USER_TASK:> Description: def get_cookie(self, name): """ Get a single cookie by name. Returns the cookie if found, None if not. :Usage: :: driver.get_cookie('my_cookie') """
if self.w3c: try: return self.execute(Command.GET_COOKIE, {'name': name})['value'] except NoSuchCookieException: return None else: cookies = self.get_cookies() for cookie in cookies: if cookie['name'] == name: return cookie return None
<SYSTEM_TASK:> Sets a sticky timeout to implicitly wait for an element to be found, <END_TASK> <USER_TASK:> Description: def implicitly_wait(self, time_to_wait): """ Sets a sticky timeout to implicitly wait for an element to be found, or a command to complete. This method only needs to be called one time per session. To set the timeout for calls to execute_async_script, see set_script_timeout. :Args: - time_to_wait: Amount of time to wait (in seconds) :Usage: :: driver.implicitly_wait(30) """
if self.w3c: self.execute(Command.SET_TIMEOUTS, { 'implicit': int(float(time_to_wait) * 1000)}) else: self.execute(Command.IMPLICIT_WAIT, { 'ms': float(time_to_wait) * 1000})
<SYSTEM_TASK:> Set the amount of time that the script should wait during an <END_TASK> <USER_TASK:> Description: def set_script_timeout(self, time_to_wait): """ Set the amount of time that the script should wait during an execute_async_script call before throwing an error. :Args: - time_to_wait: The amount of time to wait (in seconds) :Usage: :: driver.set_script_timeout(30) """
if self.w3c: self.execute(Command.SET_TIMEOUTS, { 'script': int(float(time_to_wait) * 1000)}) else: self.execute(Command.SET_SCRIPT_TIMEOUT, { 'ms': float(time_to_wait) * 1000})
<SYSTEM_TASK:> Set the amount of time to wait for a page load to complete <END_TASK> <USER_TASK:> Description: def set_page_load_timeout(self, time_to_wait): """ Set the amount of time to wait for a page load to complete before throwing an error. :Args: - time_to_wait: The amount of time to wait :Usage: :: driver.set_page_load_timeout(30) """
try: self.execute(Command.SET_TIMEOUTS, { 'pageLoad': int(float(time_to_wait) * 1000)}) except WebDriverException: self.execute(Command.SET_TIMEOUTS, { 'ms': float(time_to_wait) * 1000, 'type': 'page load'})
<SYSTEM_TASK:> Saves a screenshot of the current window to a PNG image file. Returns <END_TASK> <USER_TASK:> Description: def get_screenshot_as_file(self, filename): """ Saves a screenshot of the current window to a PNG image file. Returns False if there is any IOError, else returns True. Use full paths in your filename. :Args: - filename: The full path you wish to save your screenshot to. This should end with a `.png` extension. :Usage: :: driver.get_screenshot_as_file('/Screenshots/foo.png') """
if not filename.lower().endswith('.png'): warnings.warn("name used for saved screenshot does not match file " "type. It should end with a `.png` extension", UserWarning) png = self.get_screenshot_as_png() try: with open(filename, 'wb') as f: f.write(png) except IOError: return False finally: del png return True
<SYSTEM_TASK:> Gets the width and height of the current window. <END_TASK> <USER_TASK:> Description: def get_window_size(self, windowHandle='current'): """ Gets the width and height of the current window. :Usage: :: driver.get_window_size() """
command = Command.GET_WINDOW_SIZE if self.w3c: if windowHandle != 'current': warnings.warn("Only 'current' window is supported for W3C compatibile browsers.") size = self.get_window_rect() else: size = self.execute(command, {'windowHandle': windowHandle}) if size.get('value', None) is not None: size = size['value'] return {k: size[k] for k in ('width', 'height')}
<SYSTEM_TASK:> Gets the x,y position of the current window. <END_TASK> <USER_TASK:> Description: def get_window_position(self, windowHandle='current'): """ Gets the x,y position of the current window. :Usage: :: driver.get_window_position() """
if self.w3c: if windowHandle != 'current': warnings.warn("Only 'current' window is supported for W3C compatibile browsers.") position = self.get_window_rect() else: position = self.execute(Command.GET_WINDOW_POSITION, {'windowHandle': windowHandle})['value'] return {k: position[k] for k in ('x', 'y')}
<SYSTEM_TASK:> Sets the x, y coordinates of the window as well as height and width of <END_TASK> <USER_TASK:> Description: def set_window_rect(self, x=None, y=None, width=None, height=None): """ Sets the x, y coordinates of the window as well as height and width of the current window. This method is only supported for W3C compatible browsers; other browsers should use `set_window_position` and `set_window_size`. :Usage: :: driver.set_window_rect(x=10, y=10) driver.set_window_rect(width=100, height=200) driver.set_window_rect(x=10, y=10, width=100, height=200) """
if not self.w3c: raise UnknownMethodException("set_window_rect is only supported for W3C compatible browsers") if (x is None and y is None) and (height is None and width is None): raise InvalidArgumentException("x and y or height and width need values") return self.execute(Command.SET_WINDOW_RECT, {"x": x, "y": y, "width": width, "height": height})['value']
<SYSTEM_TASK:> Set the file detector to be used when sending keyboard input. <END_TASK> <USER_TASK:> Description: def file_detector(self, detector): """ Set the file detector to be used when sending keyboard input. By default, this is set to a file detector that does nothing. see FileDetector see LocalFileDetector see UselessFileDetector :Args: - detector: The detector to use. Must not be None. """
if detector is None: raise WebDriverException("You may not set a file detector that is null") if not isinstance(detector, FileDetector): raise WebDriverException("Detector has to be instance of FileDetector") self._file_detector = detector
<SYSTEM_TASK:> Sets the current orientation of the device <END_TASK> <USER_TASK:> Description: def orientation(self, value): """ Sets the current orientation of the device :Args: - value: orientation to set it to. :Usage: :: driver.orientation = 'landscape' """
allowed_values = ['LANDSCAPE', 'PORTRAIT'] if value.upper() in allowed_values: self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value}) else: raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'")
<SYSTEM_TASK:> Calls the method provided with the driver as an argument until the \ <END_TASK> <USER_TASK:> Description: def until(self, method, message=''): """Calls the method provided with the driver as an argument until the \ return value does not evaluate to ``False``. :param method: callable(WebDriver) :param message: optional message for :exc:`TimeoutException` :returns: the result of the last call to `method` :raises: :exc:`selenium.common.exceptions.TimeoutException` if timeout occurs """
screen = None stacktrace = None end_time = time.time() + self._timeout while True: try: value = method(self._driver) if value: return value except self._ignored_exceptions as exc: screen = getattr(exc, 'screen', None) stacktrace = getattr(exc, 'stacktrace', None) time.sleep(self._poll) if time.time() > end_time: break raise TimeoutException(message, screen, stacktrace)
<SYSTEM_TASK:> Calls the method provided with the driver as an argument until the \ <END_TASK> <USER_TASK:> Description: def until_not(self, method, message=''): """Calls the method provided with the driver as an argument until the \ return value evaluates to ``False``. :param method: callable(WebDriver) :param message: optional message for :exc:`TimeoutException` :returns: the result of the last call to `method`, or ``True`` if `method` has raised one of the ignored exceptions :raises: :exc:`selenium.common.exceptions.TimeoutException` if timeout occurs """
end_time = time.time() + self._timeout while True: try: value = method(self._driver) if not value: return value except self._ignored_exceptions: return True time.sleep(self._poll) if time.time() > end_time: break raise TimeoutException(message)
<SYSTEM_TASK:> Quits the driver and close every associated window. <END_TASK> <USER_TASK:> Description: def quit(self): """Quits the driver and close every associated window."""
try: RemoteWebDriver.quit(self) except Exception: # We don't care about the message because something probably has gone wrong pass if self.w3c: self.service.stop() else: self.binary.kill() if self.profile is not None: try: shutil.rmtree(self.profile.path) if self.profile.tempfolder is not None: shutil.rmtree(self.profile.tempfolder) except Exception as e: print(str(e))
<SYSTEM_TASK:> Sets the context that Selenium commands are running in using <END_TASK> <USER_TASK:> Description: def context(self, context): """Sets the context that Selenium commands are running in using a `with` statement. The state of the context on the server is saved before entering the block, and restored upon exiting it. :param context: Context, may be one of the class properties `CONTEXT_CHROME` or `CONTEXT_CONTENT`. Usage example:: with selenium.context(selenium.CONTEXT_CHROME): # chrome scope ... do stuff ... """
initial_context = self.execute('GET_CONTEXT').pop('value') self.set_context(context) try: yield finally: self.set_context(initial_context)
<SYSTEM_TASK:> Installs Firefox addon. <END_TASK> <USER_TASK:> Description: def install_addon(self, path, temporary=None): """ Installs Firefox addon. Returns identifier of installed addon. This identifier can later be used to uninstall addon. :param path: Absolute path to the addon that will be installed. :Usage: :: driver.install_addon('/path/to/firebug.xpi') """
payload = {"path": path} if temporary is not None: payload["temporary"] = temporary return self.execute("INSTALL_ADDON", payload)["value"]
<SYSTEM_TASK:> Sets location of the browser binary, either by string or <END_TASK> <USER_TASK:> Description: def binary(self, new_binary): """Sets location of the browser binary, either by string or ``FirefoxBinary`` instance. """
if not isinstance(new_binary, FirefoxBinary): new_binary = FirefoxBinary(new_binary) self._binary = new_binary
<SYSTEM_TASK:> Sets location of the browser profile to use, either by string <END_TASK> <USER_TASK:> Description: def profile(self, new_profile): """Sets location of the browser profile to use, either by string or ``FirefoxProfile``. """
if not isinstance(new_profile, FirefoxProfile): new_profile = FirefoxProfile(new_profile) self._profile = new_profile
<SYSTEM_TASK:> Set the network connection for the remote device. <END_TASK> <USER_TASK:> Description: def set_network_connection(self, network): """ Set the network connection for the remote device. Example of setting airplane mode:: driver.mobile.set_network_connection(driver.mobile.AIRPLANE_MODE) """
mode = network.mask if isinstance(network, self.ConnectionType) else network return self.ConnectionType(self._driver.execute( Command.SET_NETWORK_CONNECTION, { 'name': 'network_connection', 'parameters': {'type': mode}})['value'])
<SYSTEM_TASK:> Unzip zipfile to a temporary directory. <END_TASK> <USER_TASK:> Description: def unzip_to_temp_dir(zip_file_name): """Unzip zipfile to a temporary directory. The directory of the unzipped files is returned if success, otherwise None is returned. """
if not zip_file_name or not os.path.exists(zip_file_name): return None zf = zipfile.ZipFile(zip_file_name) if zf.testzip() is not None: return None # Unzip the files into a temporary directory LOGGER.info("Extracting zipped file: %s" % zip_file_name) tempdir = tempfile.mkdtemp() try: # Create directories that don't exist for zip_name in zf.namelist(): # We have no knowledge on the os where the zipped file was # created, so we restrict to zip files with paths without # charactor "\" and "/". name = (zip_name.replace("\\", os.path.sep). replace("/", os.path.sep)) dest = os.path.join(tempdir, name) if (name.endswith(os.path.sep) and not os.path.exists(dest)): os.mkdir(dest) LOGGER.debug("Directory %s created." % dest) # Copy files for zip_name in zf.namelist(): # We have no knowledge on the os where the zipped file was # created, so we restrict to zip files with paths without # charactor "\" and "/". name = (zip_name.replace("\\", os.path.sep). replace("/", os.path.sep)) dest = os.path.join(tempdir, name) if not (name.endswith(os.path.sep)): LOGGER.debug("Copying file %s......" % dest) outfile = open(dest, 'wb') outfile.write(zf.read(zip_name)) outfile.close() LOGGER.debug("File %s copied." % dest) LOGGER.info("Unzipped file can be found at %s" % tempdir) return tempdir except IOError as err: LOGGER.error("Error in extracting webdriver.xpi: %s" % err) return None
<SYSTEM_TASK:> Touch down at given coordinates. <END_TASK> <USER_TASK:> Description: def tap_and_hold(self, xcoord, ycoord): """ Touch down at given coordinates. :Args: - xcoord: X Coordinate to touch down. - ycoord: Y Coordinate to touch down. """
self._actions.append(lambda: self._driver.execute( Command.TOUCH_DOWN, { 'x': int(xcoord), 'y': int(ycoord)})) return self
<SYSTEM_TASK:> Move held tap to specified location. <END_TASK> <USER_TASK:> Description: def move(self, xcoord, ycoord): """ Move held tap to specified location. :Args: - xcoord: X Coordinate to move. - ycoord: Y Coordinate to move. """
self._actions.append(lambda: self._driver.execute( Command.TOUCH_MOVE, { 'x': int(xcoord), 'y': int(ycoord)})) return self
<SYSTEM_TASK:> Release previously issued tap 'and hold' command at specified location. <END_TASK> <USER_TASK:> Description: def release(self, xcoord, ycoord): """ Release previously issued tap 'and hold' command at specified location. :Args: - xcoord: X Coordinate to release. - ycoord: Y Coordinate to release. """
self._actions.append(lambda: self._driver.execute( Command.TOUCH_UP, { 'x': int(xcoord), 'y': int(ycoord)})) return self