docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Create a User Group Args: name (str): A name for the User Group. Must be unique among User Groups. e.g. 'My Test Team'
def usergroups_create(self, *, name: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"name": name}) return self.api_call("usergroups.create", json=kwargs)
145,234
Disable an existing User Group Args: usergroup (str): The encoded ID of the User Group to disable. e.g. 'S0604QSJC'
def usergroups_disable(self, *, usergroup: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"usergroup": usergroup}) return self.api_call("usergroups.disable", json=kwargs)
145,235
List all users in a User Group Args: usergroup (str): The encoded ID of the User Group to update. e.g. 'S0604QSJC'
def usergroups_users_list(self, *, usergroup: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"usergroup": usergroup}) return self.api_call("usergroups.users.list", http_verb="GET", params=kwargs)
145,237
Update the list of users for a User Group Args: usergroup (str): The encoded ID of the User Group to update. e.g. 'S0604QSJC' users (list): A list user IDs that represent the entire list of users for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']
def usergroups_users_update( self, *, usergroup: str, users: List[str], **kwargs ) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"usergroup": usergroup, "users": users}) return self.api_call("usergroups.users.update", json=kwargs)
145,238
Gets user presence information. Args: user (str): User to get presence info on. Defaults to the authed user. e.g. 'W1234567890'
def users_getPresence(self, *, user: str, **kwargs) -> SlackResponse: kwargs.update({"user": user}) return self.api_call("users.getPresence", http_verb="GET", params=kwargs)
145,240
Find a user with an email address. Args: email (str): An email address belonging to a user in the workspace. e.g. '[email protected]'
def users_lookupByEmail(self, *, email: str, **kwargs) -> SlackResponse: kwargs.update({"email": email}) return self.api_call("users.lookupByEmail", http_verb="GET", params=kwargs)
145,242
Set the user profile photo Args: image (str): Supply the path of the image you'd like to upload. e.g. 'myimage.png'
def users_setPhoto(self, *, image: Union[str, IOBase], **kwargs) -> SlackResponse: self._validate_xoxp_token() return self.api_call("users.setPhoto", files={"image": image}, data=kwargs)
145,243
Manually sets user presence. Args: presence (str): Either 'auto' or 'away'.
def users_setPresence(self, *, presence: str, **kwargs) -> SlackResponse: kwargs.update({"presence": presence}) return self.api_call("users.setPresence", json=kwargs)
145,244
Sets the location of the output from the static analysis tool. Parameters: location: str Filesystem location for the results.
def analysis_output(self, location: Optional[str] = None) -> None: try: if not location: location = self.prompt( "Analysis results: ", history_key="analysis_results", completer=PathCompleter(), ) self.current_analysis_output = AnalysisOutput.from_str(location) except AnalysisOutputError as e: raise UserError(f"Error loading results: {e}")
145,496
Select an issue. Parameters: issue_instance_id: int id of the issue instance to select Note: We are selecting issue instances, even though the command is called issue.
def issue(self, issue_instance_id): with self.db.make_session() as session: selected_issue = ( session.query(IssueInstance) .filter(IssueInstance.id == issue_instance_id) .scalar() ) if selected_issue is None: self.warning( f"Issue {issue_instance_id} doesn't exist. " "Type 'issues' for available issues." ) return self.sources = self._get_leaves_issue_instance( session, issue_instance_id, SharedTextKind.SOURCE ) self.sinks = self._get_leaves_issue_instance( session, issue_instance_id, SharedTextKind.SINK ) self.current_issue_instance_id = int(selected_issue.id) self.current_frame_id = -1 self.current_trace_frame_index = 1 # first one after the source print(f"Set issue to {issue_instance_id}.") if int(selected_issue.run_id) != self.current_run_id: self.current_run_id = int(selected_issue.run_id) print(f"Set run to {self.current_run_id}.") print() self._generate_trace_from_issue() self.show()
145,498
Jump to a specific trace frame in a trace. Parameters: selected_number: int the trace frame number from trace output
def jump(self, selected_number: int) -> None: self._verify_entrypoint_selected() if selected_number < 1 or selected_number > len(self.trace_tuples): raise UserError( "Trace frame number out of bounds " f"(expected 1-{len(self.trace_tuples)} but got {selected_number})." ) self.current_trace_frame_index = selected_number - 1 self.trace()
145,508
Show source code around the current trace frame location. Parameters: context: int number of lines to show above and below trace location (default: 5)
def list_source_code(self, context: int = 5) -> None: self._verify_entrypoint_selected() current_trace_frame = self.trace_tuples[ self.current_trace_frame_index ].trace_frame filename = os.path.join(self.repository_directory, current_trace_frame.filename) file_lines: List[str] = [] try: # Use readlines instead of enumerate(file) because mock_open # doesn't support __iter__ until python 3.7.1. with open(filename, "r") as file: file_lines = file.readlines() except FileNotFoundError: self.warning(f"Couldn't open {filename}.") return self._output_file_lines(current_trace_frame, file_lines, context)
145,511
Download a trained weights, config and preprocessor. Args: url (str): target url.
def download(url): filepath = get_file(fname='tmp.zip', origin=url, extract=True) base_dir = os.path.dirname(filepath) weights_file = os.path.join(base_dir, 'weights.h5') params_file = os.path.join(base_dir, 'params.json') preprocessor_file = os.path.join(base_dir, 'preprocessor.pickle') return weights_file, params_file, preprocessor_file
145,768
Loads word vectors in numpy array. Args: embeddings (dict): a dictionary of numpy array. vocab (dict): word_index lookup table. Returns: numpy array: an array of word embeddings.
def filter_embeddings(embeddings, vocab, dim): if not isinstance(embeddings, dict): return _embeddings = np.zeros([len(vocab), dim]) for word in vocab: if word in embeddings: word_idx = vocab[word] _embeddings[word_idx] = embeddings[word] return _embeddings
145,770
Loads GloVe vectors in numpy array. Args: file (str): a path to a glove file. Return: dict: a dict of numpy arrays.
def load_glove(file): model = {} with open(file, encoding="utf8", errors='ignore') as f: for line in f: line = line.split(' ') word = line[0] vector = np.array([float(val) for val in line[1:]]) model[word] = vector return model
145,771
Create a Vocabulary object. Args: max_size: The maximum size of the vocabulary, or None for no maximum. Default: None. lower: boolean. Whether to convert the texts to lowercase. unk_token: boolean. Whether to add unknown token. specials: The list of special tokens (e.g., padding or eos) that will be prepended to the vocabulary. Default: ('<pad>',)
def __init__(self, max_size=None, lower=True, unk_token=True, specials=('<pad>',)): self._max_size = max_size self._lower = lower self._unk = unk_token self._token2id = {token: i for i, token in enumerate(specials)} self._id2token = list(specials) self._token_count = Counter()
145,774
Add token to vocabulary. Args: token (str): token to add.
def add_token(self, token): token = self.process_token(token) self._token_count.update([token])
145,775
Update dictionary from a collection of documents. Each document is a list of tokens. Args: docs (list): documents to add.
def add_documents(self, docs): for sent in docs: sent = map(self.process_token, sent) self._token_count.update(sent)
145,776
Get the list of token_id given doc. Args: doc (list): document. Returns: list: int id of doc.
def doc2id(self, doc): doc = map(self.process_token, doc) return [self.token_to_id(token) for token in doc]
145,777
Get the token_id of given token. Args: token (str): token from vocabulary. Returns: int: int id of token.
def token_to_id(self, token): token = self.process_token(token) return self._token2id.get(token, len(self._token2id) - 1)
145,779
Probability estimates. The returned estimates for all classes are ordered by the label of classes. Args: text : string, the input text. Returns: y : array-like, shape = [num_words, num_classes] Returns the probability of the word for each class in the model,
def predict_proba(self, text): assert isinstance(text, str) words = self.tokenizer(text) X = self.preprocessor.transform([words]) y = self.model.predict(X) y = y[0] # reduce batch dimension. return y
145,783
Predict using the model. Args: text: string, the input text. Returns: tags: list, shape = (num_words,) Returns predicted values.
def predict(self, text): pred = self.predict_proba(text) tags = self._get_tags(pred) return tags
145,787
Pads nested sequences to the same length. This function transforms a list of list sequences into a 3D Numpy array of shape `(num_samples, max_sent_len, max_word_len)`. Args: sequences: List of lists of lists. dtype: Type of the output sequences. # Returns x: Numpy array.
def pad_nested_sequences(sequences, dtype='int32'): max_sent_len = 0 max_word_len = 0 for sent in sequences: max_sent_len = max(len(sent), max_sent_len) for word in sent: max_word_len = max(len(word), max_word_len) x = np.zeros((len(sequences), max_sent_len, max_word_len)).astype(dtype) for i, sent in enumerate(sequences): for j, word in enumerate(sent): x[i, j, :len(word)] = word return x
145,789
Create a preprocessor object. Args: lower: boolean. Whether to convert the texts to lowercase. use_char: boolean. Whether to use char feature. num_norm: boolean. Whether to normalize text. initial_vocab: Iterable. Initial vocabulary for expanding word_vocab.
def __init__(self, lower=True, num_norm=True, use_char=True, initial_vocab=None): self._num_norm = num_norm self._use_char = use_char self._word_vocab = Vocabulary(lower=lower) self._char_vocab = Vocabulary(lower=False) self._label_vocab = Vocabulary(lower=False, unk_token=False) if initial_vocab: self._word_vocab.add_documents([initial_vocab]) self._char_vocab.add_documents(initial_vocab)
145,790
Learn vocabulary from training set. Args: X : iterable. An iterable which yields either str, unicode or file objects. Returns: self : IndexTransformer.
def fit(self, X, y): self._word_vocab.add_documents(X) self._label_vocab.add_documents(y) if self._use_char: for doc in X: self._char_vocab.add_documents(doc) self._word_vocab.build() self._char_vocab.build() self._label_vocab.build() return self
145,791
Transform documents to document ids. Uses the vocabulary learned by fit. Args: X : iterable an iterable which yields either str, unicode or file objects. y : iterabl, label strings. Returns: features: document id matrix. y: label id matrix.
def transform(self, X, y=None): word_ids = [self._word_vocab.doc2id(doc) for doc in X] word_ids = pad_sequences(word_ids, padding='post') if self._use_char: char_ids = [[self._char_vocab.doc2id(w) for w in doc] for doc in X] char_ids = pad_nested_sequences(char_ids) features = [word_ids, char_ids] else: features = word_ids if y is not None: y = [self._label_vocab.doc2id(doc) for doc in y] y = pad_sequences(y, padding='post') y = to_categorical(y, self.label_size).astype(int) # In 2018/06/01, to_categorical is a bit strange. # >>> to_categorical([[1,3]], num_classes=4).shape # (1, 2, 4) # >>> to_categorical([[1]], num_classes=4).shape # (1, 4) # So, I expand dimensions when len(y.shape) == 2. y = y if len(y.shape) == 3 else np.expand_dims(y, axis=0) return features, y else: return features
145,792
Learn vocabulary and return document id matrix. This is equivalent to fit followed by transform. Args: X : iterable an iterable which yields either str, unicode or file objects. Returns: list : document id matrix. list: label id matrix.
def fit_transform(self, X, y=None, **params): return self.fit(X, y).transform(X, y)
145,793
Return label strings. Args: y: label id matrix. lengths: sentences length. Returns: list: list of list of strings.
def inverse_transform(self, y, lengths=None): y = np.argmax(y, -1) inverse_y = [self._label_vocab.id2doc(ids) for ids in y] if lengths is not None: inverse_y = [iy[:l] for iy, l in zip(inverse_y, lengths)] return inverse_y
145,794
Transform documents to document ids. Uses the vocabulary learned by fit. Args: X : iterable an iterable which yields either str, unicode or file objects. y : iterabl, label strings. Returns: features: document id matrix. y: label id matrix.
def transform(self, X, y=None): word_ids = [self._word_vocab.doc2id(doc) for doc in X] word_ids = pad_sequences(word_ids, padding='post') char_ids = [[self._char_vocab.doc2id(w) for w in doc] for doc in X] char_ids = pad_nested_sequences(char_ids) character_ids = batch_to_ids(X) elmo_embeddings = self._elmo(character_ids)['elmo_representations'][1] elmo_embeddings = elmo_embeddings.detach().numpy() features = [word_ids, char_ids, elmo_embeddings] if y is not None: y = [self._label_vocab.doc2id(doc) for doc in y] y = pad_sequences(y, padding='post') y = to_categorical(y, self.label_size).astype(int) # In 2018/06/01, to_categorical is a bit strange. # >>> to_categorical([[1,3]], num_classes=4).shape # (1, 2, 4) # >>> to_categorical([[1]], num_classes=4).shape # (1, 4) # So, I expand dimensions when len(y.shape) == 2. y = y if len(y.shape) == 3 else np.expand_dims(y, axis=0) return features, y else: return features
145,796
Returns the prediction of the model on the given test data. Args: x_test : array-like, shape = (n_samples, sent_length) Test samples. Returns: y_pred : array-like, shape = (n_smaples, sent_length) Prediction labels for x.
def predict(self, x_test): if self.model: lengths = map(len, x_test) x_test = self.p.transform(x_test) y_pred = self.model.predict(x_test) y_pred = self.p.inverse_transform(y_pred, lengths) return y_pred else: raise OSError('Could not find a model. Call load(dir_path).')
145,801
Analyze text and return pretty format. Args: text: string, the input text. tokenizer: Tokenize input sentence. Default tokenizer is `str.split`. Returns: res: dict.
def analyze(self, text, tokenizer=str.split): if not self.tagger: self.tagger = Tagger(self.model, preprocessor=self.p, tokenizer=tokenizer) return self.tagger.analyze(text)
145,802
Returns a notebook object with papermill metadata loaded from the specified path. Args: notebook_path (str): Path to the notebook file. Returns: nbformat.NotebookNode
def load_notebook_node(notebook_path): nb = nbformat.reads(papermill_io.read(notebook_path), as_version=4) if not hasattr(nb.metadata, 'papermill'): nb.metadata['papermill'] = { 'parameters': dict(), 'environment_variables': dict(), 'version': __version__, } for cell in nb.cells: if not hasattr(cell.metadata, 'tags'): cell.metadata['tags'] = [] # Create tags attr if one doesn't exist. if not hasattr(cell.metadata, 'papermill'): cell.metadata['papermill'] = dict() return nb
146,190
Download Malmo from github and optionaly build the Minecraft Mod. Args: branch: optional branch to clone. Default is release version. buildMod: don't build the Mod unless build arg is given as True. Returns: The path for the Malmo Minecraft mod.
def download(branch=None, buildMod=False): gradlew = "./gradlew" if os.name == 'nt': gradlew = "gradlew.bat" if branch is None: branch = malmo_version subprocess.check_call(["git", "clone", "-b", branch, "https://github.com/Microsoft/malmo.git" , malmo_install_dir]) os.chdir(malmo_install_dir) os.chdir("Minecraft") try: # Create the version properties file. pathlib.Path("src/main/resources/version.properties").write_text("malmomod.version={}\n".format(malmo_version)) # Optionally do a test build. if buildMod: subprocess.check_call([gradlew, "setupDecompWorkspace", "build", "testClasses", "-x", "test", "--stacktrace", "-Pversion={}" .format(malmo_version)]) minecraft_dir = os.getcwd() finally: os.chdir("../..") if "MALMO_XSD_PATH" not in os.environ: print("Please make sure you set the MALMO_XSD_PATH environment variable to \"{}/Schemas\"!" .format(str(pathlib.Path(malmo_install_dir).absolute()))) return minecraft_dir
147,360
Launch Malmo Minecraft Mod in one or more clients from the Minecraft directory on the (optionally) given ports. Args: ports: an optionsl list of ports to start minecraft clients on. Defaults to a single Minecraft client on port 10000. wait_timeout: optional time in seconds to wait (defaults to 3 mins).
def launch_minecraft(ports = [], wait_timeout = 360): if "MALMO_XSD_PATH" not in os.environ: print("Please set the MALMO_XSD_PATH environment variable.") return cwd = os.getcwd() try: os.chdir(malmo_install_dir + "/Minecraft") launch_minecraft_in_background(os.getcwd(), ports, wait_timeout) finally: os.chdir(cwd)
147,361
Sets a mission running. Parameters: mission_spec : MissionSpec instance, specifying the mission. mission_record_spec : MissionRecordSpec instance, specifying what should be recorded. role : int, the index of the role this human agent is to play. Zero based.
def runMission( self, mission_spec, mission_record_spec, role = 0 ): self.world_state = None total_reward = 0 # decide on the action space command_handlers = mission_spec.getListOfCommandHandlers(role) if 'ContinuousMovement' in command_handlers and 'DiscreteMovement' in command_handlers: print('ERROR: Ambiguous action space in supplied mission: both continuous and discrete command handlers present.') exit(1) elif 'ContinuousMovement' in command_handlers: self.action_space = 'continuous' elif 'DiscreteMovement' in command_handlers: self.action_space = 'discrete' else: print('ERROR: Unknown action space in supplied mission: neither continuous or discrete command handlers present.') exit(1) self.createGUI() if mission_spec.isVideoRequested(0): self.canvas.config( width=mission_spec.getVideoWidth(0), height=mission_spec.getVideoHeight(0) ) # show the mission summary start_time = time.time() while time.time() - start_time < 4: canvas_id = self.canvas.create_rectangle(100, 100, 540, 200, fill="white", outline="red", width="5") self.canvas.create_text(320, 120, text=mission_spec.getSummary(), font=('Helvetica', '16')) self.canvas.create_text(320, 150, text=str(3 - int(time.time() - start_time)), font=('Helvetica', '16'), fill="red") self.root.update() time.sleep(0.2) try: self.agent_host.startMission( mission_spec, mission_record_spec ) except RuntimeError as e: tkinter.messagebox.showerror("Error","Error starting mission: "+str(e)) return print("Waiting for the mission to start", end=' ') self.world_state = self.agent_host.peekWorldState() while not self.world_state.has_mission_begun: print(".", end="") time.sleep(0.1) self.world_state = self.agent_host.peekWorldState() for error in self.world_state.errors: print("Error:",error.text) print() if self.action_space == 'continuous': self.canvas.config(cursor='none') # hide the mouse cursor while over the canvas self.canvas.event_generate('<Motion>', warp=True, x=old_div(self.canvas.winfo_width(),2), y=old_div(self.canvas.winfo_height(),2)) # put cursor at center self.root.after(50, self.update) self.canvas.focus_set() while self.world_state.is_mission_running: self.world_state = self.agent_host.getWorldState() if self.world_state.number_of_observations_since_last_state > 0: self.observation.config(text = self.world_state.observations[0].text ) if mission_spec.isVideoRequested(0) and self.world_state.number_of_video_frames_since_last_state > 0: frame = self.world_state.video_frames[-1] image = Image.frombytes('RGB', (frame.width,frame.height), bytes(frame.pixels) ) photo = ImageTk.PhotoImage(image) self.canvas.delete("all") self.canvas.create_image(old_div(frame.width,2), old_div(frame.height,2), image=photo) self.canvas.create_line( old_div(self.canvas.winfo_width(),2)-5, old_div(self.canvas.winfo_height(),2), old_div(self.canvas.winfo_width(),2)+6, old_div(self.canvas.winfo_height(),2), fill='white' ) self.canvas.create_line( old_div(self.canvas.winfo_width(),2), old_div(self.canvas.winfo_height(),2)-5, old_div(self.canvas.winfo_width(),2), old_div(self.canvas.winfo_height(),2)+6, fill='white' ) # parse reward for reward in self.world_state.rewards: total_reward += reward.getValue() self.reward.config(text = str(total_reward) ) self.root.update() time.sleep(0.01) if self.action_space == 'continuous': self.canvas.config(cursor='arrow') # restore the mouse cursor print('Mission stopped') if not self.agent_host.receivedArgument("test"): tkinter.messagebox.showinfo("Mission ended","Mission has ended. Total reward: " + str(total_reward) ) self.root_frame.destroy()
147,407
Download Malmo from github and build (by default) the Minecraft Mod. Example usage: import malmoenv.bootstrap; malmoenv.bootstrap.download() Args: branch: optional branch to clone. TODO Default is release version. build: build the Mod unless build arg is given as False. installdir: the install dir name. Defaults to MalmoPlatform. Returns: The path for the Malmo Minecraft mod.
def download(branch=None, build=True, installdir="MalmoPlatform"): if branch is None: branch = malmo_version subprocess.check_call(["git", "clone", "-b", branch, "https://github.com/Microsoft/malmo.git", installdir]) return setup(build=build, installdir=installdir)
147,480
Launch Minecraft listening for malmoenv connections. Args: port: the TCP port to listen on. installdir: the install dir name. Defaults to MalmoPlatform. Must be same as given (or defaulted) in download call if used. replaceable: whether or not to automatically restart Minecraft (default is false).
def launch_minecraft(port, installdir="MalmoPlatform", replaceable=False): launch_script = './launchClient.sh' if os.name == 'nt': launch_script = 'launchClient.bat' cwd = os.getcwd() os.chdir(installdir) os.chdir("Minecraft") try: cmd = [launch_script, '-port', str(port), '-env'] if replaceable: cmd.append('-replaceable') subprocess.check_call(cmd) finally: os.chdir(cwd)
147,482
Load a theme from the specified configuration file. Parameters: filename: The name of the filename to load. source: A description of where the theme was loaded from.
def from_file(cls, filename, source): _logger.info('Loading theme %s', filename) try: config = configparser.ConfigParser() config.optionxform = six.text_type # Preserve case with codecs.open(filename, encoding='utf-8') as fp: config.readfp(fp) except configparser.ParsingError as e: raise ConfigError(e.message) if not config.has_section('theme'): raise ConfigError( 'Error loading {0}:\n' ' missing [theme] section'.format(filename)) theme_name = os.path.basename(filename) theme_name, _ = os.path.splitext(theme_name) elements = {} for element, line in config.items('theme'): if element not in cls.DEFAULT_ELEMENTS: # Could happen if using a new config with an older version # of the software _logger.info('Skipping element %s', element) continue elements[element] = cls._parse_line(element, line, filename) return cls(name=theme_name, source=source, elements=elements)
147,722
Converts hex RGB to the 6x6x6 xterm color space Args: color (str): RGB color string in the format "#RRGGBB" Returns: str: ansi color string in the format "ansi_n", where n is between 16 and 230 Reference: https://github.com/chadj2/bash-ui/blob/master/COLORS.md
def rgb_to_ansi(color): if color[0] != '#' or len(color) != 7: return None try: r = round(int(color[1:3], 16) / 51.0) # Normalize between 0-5 g = round(int(color[3:5], 16) / 51.0) b = round(int(color[5:7], 16) / 51.0) n = int(36 * r + 6 * g + b + 16) return 'ansi_{0:d}'.format(n) except ValueError: return None
147,725
Overlay a message box on the center of the screen and wait for input. Params: message (list or string): List of strings, one per line. timeout (float): Optional, maximum length of time that the message will be shown before disappearing. style (str): The theme element that will be applied to the notification window
def show_notification(self, message, timeout=None, style='Info'): assert style in ('Info', 'Warning', 'Error', 'Success') if isinstance(message, six.string_types): message = message.splitlines() n_rows, n_cols = self.stdscr.getmaxyx() v_offset, h_offset = self.stdscr.getbegyx() box_width = max(len(m) for m in message) + 2 box_height = len(message) + 2 # Cut off the lines of the message that don't fit on the screen box_width = min(box_width, n_cols) box_height = min(box_height, n_rows) message = message[:box_height - 2] s_row = (n_rows - box_height) // 2 + v_offset s_col = (n_cols - box_width) // 2 + h_offset window = curses.newwin(box_height, box_width, s_row, s_col) window.bkgd(str(' '), self.attr('Notice{0}'.format(style))) window.erase() window.border() for index, line in enumerate(message, start=1): self.add_line(window, line, index, 1) window.refresh() ch, start = -1, time.time() with self.no_delay(): while timeout is None or time.time() - start < timeout: ch = self.getch() if ch != -1: break time.sleep(0.01) window.clear() del window self.stdscr.touchwin() self.stdscr.refresh() return ch
147,811
Search through the mime handlers list and attempt to find the appropriate command to open the provided url with. Will raise a MailcapEntryNotFound exception if no valid command exists. Params: url (text): URL that will be checked Returns: command (text): The string of the command that should be executed in a subprocess to open the resource. entry (dict): The full mailcap entry for the corresponding command
def get_mailcap_entry(self, url): for parser in mime_parsers.parsers: if parser.pattern.match(url): # modified_url may be the same as the original url, but it # could also be updated to point to a different page, or it # could refer to the location of a temporary file with the # page's downloaded content. try: modified_url, content_type = parser.get_mimetype(url) except Exception as e: # If Imgur decides to change its html layout, let it fail # silently in the background instead of crashing. _logger.warning('parser %s raised an exception', parser) _logger.exception(e) raise exceptions.MailcapEntryNotFound() if not content_type: _logger.info('Content type could not be determined') raise exceptions.MailcapEntryNotFound() elif content_type == 'text/html': _logger.info('Content type text/html, deferring to browser') raise exceptions.MailcapEntryNotFound() command, entry = mailcap.findmatch( self._mailcap_dict, content_type, filename=modified_url) if not entry: _logger.info('Could not find a valid mailcap entry') raise exceptions.MailcapEntryNotFound() return command, entry # No parsers matched the url raise exceptions.MailcapEntryNotFound()
147,816
Display a text prompt at the bottom of the screen. Params: prompt (string): Text prompt that will be displayed key (bool): If true, grab a single keystroke instead of a full string. This can be faster than pressing enter for single key prompts (e.g. y/n?)
def prompt_input(self, prompt, key=False): n_rows, n_cols = self.stdscr.getmaxyx() v_offset, h_offset = self.stdscr.getbegyx() ch, attr = str(' '), self.attr('Prompt') prompt = self.clean(prompt, n_cols - 1) # Create a new window to draw the text at the bottom of the screen, # so we can erase it when we're done. s_row = v_offset + n_rows - 1 s_col = h_offset prompt_win = curses.newwin(1, len(prompt) + 1, s_row, s_col) prompt_win.bkgd(ch, attr) self.add_line(prompt_win, prompt) prompt_win.refresh() # Create a separate window for text input s_col = h_offset + len(prompt) input_win = curses.newwin(1, n_cols - len(prompt), s_row, s_col) input_win.bkgd(ch, attr) input_win.refresh() if key: self.curs_set(1) ch = self.getch() # We can't convert the character to unicode, because it may return # Invalid values for keys that don't map to unicode characters, # e.g. F1 text = ch if ch != self.ESCAPE else None self.curs_set(0) else: text = self.text_input(input_win) prompt_win.clear() input_win.clear() del prompt_win del input_win self.stdscr.touchwin() self.stdscr.refresh() return text
147,822
Sample this motion track into discretized motion events. Args: contact_id: contact point id accuracy: motion minimum difference in space dt: sample time difference
def discretize(self, contact_id=0, accuracy=0.004, dt=0.001): if not self.event_points: return [] events = [] action_dt = accuracy / self.speed dt = dt or action_dt ep0 = self.event_points[0] for _ in range(int(ep0[0] / dt)): events.append(['s', dt]) events.append(['d', ep0[1], contact_id]) for i, ep in enumerate(self.event_points[1:]): prev_ts = self.event_points[i][0] curr_ts = ep[0] p0 = self.event_points[i][1] p1 = ep[1] if p0 == p1: # hold for _ in range(int((curr_ts - prev_ts) / dt)): events.append(['s', dt]) else: # move dpoints = track_sampling([p0, p1], accuracy) for p in dpoints: events.append(['m', p, contact_id]) for _ in range(int(action_dt / dt)): events.append(['s', dt]) events.append(['u', contact_id]) return events
148,195
Similar to swipe action, but the end point is provide by a UI proxy or by fixed coordinates. Args: target (:py:class:`UIObjectProxy <poco.proxy.UIObjectProxy>`): a UI proxy or 2-list/2-tuple coordinates (x, y) in NormalizedCoordinate system duration (:py:obj:`float`): time interval in which the action is performed Raises: PocoNoSuchNodeException: raised when the UI element does not exist
def drag_to(self, target, duration=2.0): try: duration = float(duration) except ValueError: raise ValueError('Argument `duration` should be <float>. Got {}'.format(repr(duration))) if type(target) in (list, tuple): target_pos = target else: target_pos = target.get_position() origin_pos = self.get_position() dir_ = [target_pos[0] - origin_pos[0], target_pos[1] - origin_pos[1]] return self.swipe(dir_, duration=duration)
148,302
Get a new UI proxy copy with the given focus. Return a new UI proxy object as the UI proxy is immutable. Args: f (2-:obj:`tuple`/2-:obj:`list`/:obj:`str`): the focus point, it can be specified as 2-list/2-tuple coordinates (x, y) in NormalizedCoordinate system or as 'center' or 'anchor'. Returns: :py:class:`UIObjectProxy <poco.proxy.UIObjectProxy>`: a new UI proxy object (copy)
def focus(self, f): ret = copy.copy(self) ret._focus = f return ret
148,305
Get the position of the UI elements. Args: focus: focus point of UI proxy, see :py:meth:`.focus() <poco.proxy.UIObjectProxy.focus>` for more details Returns: 2-list/2-tuple: coordinates (x, y) in NormalizedCoordinate system Raises: TypeError: raised when unsupported focus type is specified
def get_position(self, focus=None): focus = focus or self._focus or 'anchor' if focus == 'anchor': pos = self.attr('pos') elif focus == 'center': x, y = self.attr('pos') w, h = self.get_size() ap_x, ap_y = self.attr("anchorPoint") fx, fy = 0.5, 0.5 pos = [x + w * (fx - ap_x), y + h * (fy - ap_y)] elif type(focus) in (list, tuple): x, y = self.attr('pos') w, h = self.get_size() ap_x, ap_y = self.attr("anchorPoint") fx, fy = focus pos = [x + w * (fx - ap_x), y + h * (fy - ap_y)] else: raise TypeError('Unsupported focus type {}. ' 'Only "anchor/center" or 2-list/2-tuple available.'.format(type(focus))) return pos
148,306
Block and wait for max given time before the UI element appears. Args: timeout: maximum waiting time in seconds Returns: :py:class:`UIObjectProxy <poco.proxy.UIObjectProxy>`: self
def wait(self, timeout=3): start = time.time() while not self.exists(): self.poco.sleep_for_polling_interval() if time.time() - start > timeout: break return self
148,308
Block and wait until the UI element **disappears** within the given timeout. Args: timeout: maximum waiting time in seconds Raises: PocoTargetTimeout: when timeout
def wait_for_disappearance(self, timeout=120): start = time.time() while self.exists(): self.poco.sleep_for_polling_interval() if time.time() - start > timeout: raise PocoTargetTimeout('disappearance', self)
148,309
Change the attribute value of the UI element. Not all attributes can be casted to text. If changing the immutable attributes or attributes which do not exist, the InvalidOperationException exception is raised. Args: name: attribute name val: new attribute value to cast Raises: InvalidOperationException: when it fails to set the attribute on UI element
def setattr(self, name, val): nodes = self._do_query(multiple=False) try: return self.poco.agent.hierarchy.setAttr(nodes, name, val) except UnableToSetAttributeException as e: raise InvalidOperationException('"{}" of "{}"'.format(str(e), self))
148,311
Similar to click but press the screen for the given time interval and then release Args: pos (:obj:`2-list/2-tuple`): coordinates (x, y) in range from 0 to 1 duration: duration of press the screen
def long_click(self, pos, duration=2.0): try: duration = float(duration) except ValueError: raise ValueError('Argument `duration` should be <float>. Got {}'.format(repr(duration))) if not (0 <= pos[0] <= 1) or not (0 <= pos[1] <= 1): raise InvalidOperationException('Click position out of screen. {}'.format(repr(pos))) return self.agent.input.longClick(pos[0], pos[1], duration)
148,344
Scroll from the lower part to the upper part of the entire screen. Args: direction (:py:obj:`str`): scrolling direction. "vertical" or "horizontal" percent (:py:obj:`float`): scrolling distance percentage of the entire screen height or width according to direction duration (:py:obj:`float`): time interval in which the action is performed
def scroll(self, direction='vertical', percent=0.6, duration=2.0): if direction not in ('vertical', 'horizontal'): raise ValueError('Argument `direction` should be one of "vertical" or "horizontal". Got {}' .format(repr(direction))) start = [0.5, 0.5] half_distance = percent / 2 if direction == 'vertical': start[1] += half_distance direction = [0, -percent] else: start[0] += half_distance direction = [-percent, 0] return self.swipe(start, direction=direction, duration=duration)
148,345
Squeezing or expanding 2 fingers on the entire screen. Args: direction (:py:obj:`str`): pinching direction, only "in" or "out". "in" for squeezing, "out" for expanding percent (:py:obj:`float`): squeezing range from or expanding range to of the entire screen duration (:py:obj:`float`): time interval in which the action is performed dead_zone (:py:obj:`float`): pinching inner circle radius. should not be greater than ``percent``
def pinch(self, direction='in', percent=0.6, duration=2.0, dead_zone=0.1): if direction not in ('in', 'out'): raise ValueError('Argument `direction` should be one of "in" or "out". Got {}'.format(repr(direction))) if dead_zone >= percent: raise ValueError('Argument `dead_zone` should not be greater than `percent`. dead_zoon={}, percent={}' .format(repr(dead_zone), repr(percent))) tracks = make_pinching(direction, [0.5, 0.5], [1, 1], percent, dead_zone, duration) speed = (percent - dead_zone) / 2 / duration # 速度慢的时候,精度适当要提高,这样有助于控制准确 ret = self.apply_motion_tracks(tracks, accuracy=speed * 0.03) return ret
148,346
Similar to click but press the screen for the given time interval and then release Args: tracks (:py:obj:`list`): list of :py:class:`poco.utils.track.MotionTrack` object accuracy (:py:obj:`float`): motion accuracy for each motion steps in normalized coordinate metrics.
def apply_motion_tracks(self, tracks, accuracy=0.004): if not tracks: raise ValueError('Please provide at least one track. Got {}'.format(repr(tracks))) tb = MotionTrackBatch(tracks) return self.agent.input.applyMotionEvents(tb.discretize(accuracy))
148,347
This is only a slot to store and get already initialized poco instance rather than initializing again. You can simply pass the ``current device instance`` provided by ``airtest`` to get the AndroidUiautomationPoco instance. If no such AndroidUiautomationPoco instance, a new instance will be created and stored. Args: device (:py:obj:`airtest.core.device.Device`): more details refer to ``airtest doc`` Returns: poco instance
def get_instance(cls, device): if cls._nuis.get(device) is None: cls._nuis[device] = AndroidUiautomationPoco(device) return cls._nuis[device]
148,370
Determine if a number of Suggested Actions are supported by a Channel. Args: channel_id (str): The Channel to check the if Suggested Actions are supported in. button_cnt (int, optional): Defaults to 100. The number of Suggested Actions to check for the Channel. Returns: bool: True if the Channel supports the button_cnt total Suggested Actions, False if the Channel does not support that number of Suggested Actions.
def supports_suggested_actions(channel_id: str, button_cnt: int = 100) -> bool: max_actions = { # https://developers.facebook.com/docs/messenger-platform/send-messages/quick-replies Channels.facebook: 10, Channels.skype: 10, # https://developers.line.biz/en/reference/messaging-api/#items-object Channels.line: 13, # https://dev.kik.com/#/docs/messaging#text-response-object Channels.kik: 20, Channels.telegram: 100, Channels.slack: 100, Channels.emulator: 100, Channels.direct_line: 100, Channels.webchat: 100, } return button_cnt <= max_actions[channel_id] if channel_id in max_actions else False
149,655
Determine if a number of Card Actions are supported by a Channel. Args: channel_id (str): The Channel to check if the Card Actions are supported in. button_cnt (int, optional): Defaults to 100. The number of Card Actions to check for the Channel. Returns: bool: True if the Channel supports the button_cnt total Card Actions, False if the Channel does not support that number of Card Actions.
def supports_card_actions(channel_id: str, button_cnt: int = 100) -> bool: max_actions = { Channels.facebook: 3, Channels.skype: 3, Channels.ms_teams: 3, Channels.line: 99, Channels.slack: 100, Channels.emulator: 100, Channels.direct_line: 100, Channels.webchat: 100, Channels.cortana: 100, } return button_cnt <= max_actions[channel_id] if channel_id in max_actions else False
149,656
Get the Channel Id from the current Activity on the Turn Context. Args: turn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from. Returns: str: The Channel Id from the Turn Context's Activity.
def get_channel_id(turn_context: TurnContext) -> str: if turn_context.activity.channel_id is None: return "" else: return turn_context.activity.channel_id
149,657
run testcase or testsuite. Args: config (dict): testcase/testsuite config dict { "name": "ABC", "variables": {}, "setup_hooks", [], "teardown_hooks", [] } http_client_session (instance): requests.Session(), or locust.client.Session() instance.
def __init__(self, config, http_client_session=None): self.verify = config.get("verify", True) self.output = config.get("output", []) self.validation_results = [] config_variables = config.get("variables", {}) # testcase setup hooks testcase_setup_hooks = config.get("setup_hooks", []) # testcase teardown hooks self.testcase_teardown_hooks = config.get("teardown_hooks", []) self.http_client_session = http_client_session or HttpSession() self.session_context = SessionContext(config_variables) if testcase_setup_hooks: self.do_hook_actions(testcase_setup_hooks, "setup")
150,336
handle skip feature for test - skip: skip current test unconditionally - skipIf: skip current test if condition is true - skipUnless: skip current test unless condition is true Args: test_dict (dict): test info Raises: SkipTest: skip test
def _handle_skip_feature(self, test_dict): # TODO: move skip to initialize skip_reason = None if "skip" in test_dict: skip_reason = test_dict["skip"] elif "skipIf" in test_dict: skip_if_condition = test_dict["skipIf"] if self.session_context.eval_content(skip_if_condition): skip_reason = "{} evaluate to True".format(skip_if_condition) elif "skipUnless" in test_dict: skip_unless_condition = test_dict["skipUnless"] if not self.session_context.eval_content(skip_unless_condition): skip_reason = "{} evaluate to False".format(skip_unless_condition) if skip_reason: raise SkipTest(skip_reason)
150,339
call hook actions. Args: actions (list): each action in actions list maybe in two format. format1 (dict): assignment, the value returned by hook function will be assigned to variable. {"var": "${func()}"} format2 (str): only call hook functions. ${func()} hook_type (enum): setup/teardown
def do_hook_actions(self, actions, hook_type): logger.log_debug("call {} hook actions.".format(hook_type)) for action in actions: if isinstance(action, dict) and len(action) == 1: # format 1 # {"var": "${func()}"} var_name, hook_content = list(action.items())[0] hook_content_eval = self.session_context.eval_content(hook_content) logger.log_debug( "assignment with hook: {} = {} => {}".format( var_name, hook_content, hook_content_eval ) ) self.session_context.update_test_variables( var_name, hook_content_eval ) else: # format 2 logger.log_debug("call hook function: {}".format(action)) # TODO: check hook function if valid self.session_context.eval_content(action)
150,340
prepare locust testcases Args: path (str): testcase file path. Returns: list: locust tests data [ testcase1_dict, testcase2_dict ]
def prepare_locust_tests(path): tests_mapping = loader.load_tests(path) testcases = parser.parse_tests(tests_mapping) locust_tests = [] for testcase in testcases: testcase_weight = testcase.get("config", {}).pop("weight", 1) for _ in range(testcase_weight): locust_tests.append(testcase) return locust_tests
150,345
initialize HttpRunner. Args: failfast (bool): stop the test run on the first error or failure. save_tests (bool): save loaded/parsed tests to JSON file. report_template (str): report template file path, template should be in Jinja2 format. report_dir (str): html report save directory. log_level (str): logging level. log_file (str): log file path.
def __init__(self, failfast=False, save_tests=False, report_template=None, report_dir=None, log_level="INFO", log_file=None): self.exception_stage = "initialize HttpRunner()" kwargs = { "failfast": failfast, "resultclass": report.HtmlTestResult } self.unittest_runner = unittest.TextTestRunner(**kwargs) self.test_loader = unittest.TestLoader() self.save_tests = save_tests self.report_template = report_template self.report_dir = report_dir self._summary = None if log_file: logger.setup_logger(log_level, log_file)
150,346
initialize testcase with Runner() and add to test suite. Args: testcases (list): testcases list. Returns: unittest.TestSuite()
def _add_tests(self, testcases): def _add_test(test_runner, test_dict): def test(self): try: test_runner.run_test(test_dict) except exceptions.MyBaseFailure as ex: self.fail(str(ex)) finally: self.meta_datas = test_runner.meta_datas if "config" in test_dict: # run nested testcase test.__doc__ = test_dict["config"].get("name") variables = test_dict["config"].get("variables", {}) else: # run api test test.__doc__ = test_dict.get("name") variables = test_dict.get("variables", {}) if isinstance(test.__doc__, parser.LazyString): parsed_variables = parser.parse_variables_mapping(variables, ignore=True) test.__doc__ = parser.parse_lazy_data( test.__doc__, parsed_variables) return test test_suite = unittest.TestSuite() for testcase in testcases: config = testcase.get("config", {}) test_runner = runner.Runner(config) TestSequense = type('TestSequense', (unittest.TestCase,), {}) tests = testcase.get("teststeps", []) for index, test_dict in enumerate(tests): for times_index in range(int(test_dict.get("times", 1))): # suppose one testcase should not have more than 9999 steps, # and one step should not run more than 999 times. test_method_name = 'test_{:04}_{:03}'.format(index, times_index) test_method = _add_test(test_runner, test_dict) setattr(TestSequense, test_method_name, test_method) loaded_testcase = self.test_loader.loadTestsFromTestCase(TestSequense) setattr(loaded_testcase, "config", config) setattr(loaded_testcase, "teststeps", tests) setattr(loaded_testcase, "runner", test_runner) test_suite.addTest(loaded_testcase) return test_suite
150,347
run tests in test_suite Args: test_suite: unittest.TestSuite() Returns: list: tests_results
def _run_suite(self, test_suite): tests_results = [] for testcase in test_suite: testcase_name = testcase.config.get("name") logger.log_info("Start to run testcase: {}".format(testcase_name)) result = self.unittest_runner.run(testcase) tests_results.append((testcase, result)) return tests_results
150,348
aggregate results Args: tests_results (list): list of (testcase, result)
def _aggregate(self, tests_results): summary = { "success": True, "stat": { "testcases": { "total": len(tests_results), "success": 0, "fail": 0 }, "teststeps": {} }, "time": {}, "platform": report.get_platform(), "details": [] } for tests_result in tests_results: testcase, result = tests_result testcase_summary = report.get_summary(result) if testcase_summary["success"]: summary["stat"]["testcases"]["success"] += 1 else: summary["stat"]["testcases"]["fail"] += 1 summary["success"] &= testcase_summary["success"] testcase_summary["name"] = testcase.config.get("name") testcase_summary["in_out"] = utils.get_testcase_io(testcase) report.aggregate_stat(summary["stat"]["teststeps"], testcase_summary["stat"]) report.aggregate_stat(summary["time"], testcase_summary["time"]) summary["details"].append(testcase_summary) return summary
150,349
run testcase/testsuite file or folder. Args: path (str): testcase/testsuite file/foler path. dot_env_path (str): specified .env file path. mapping (dict): if mapping is specified, it will override variables in config block. Returns: instance: HttpRunner() instance
def run_path(self, path, dot_env_path=None, mapping=None): # load tests self.exception_stage = "load tests" tests_mapping = loader.load_tests(path, dot_env_path) tests_mapping["project_mapping"]["test_path"] = path if mapping: tests_mapping["project_mapping"]["variables"] = mapping return self.run_tests(tests_mapping)
150,351
main interface. Args: path_or_tests: str: testcase/testsuite file/foler path dict: valid testcase/testsuite data
def run(self, path_or_tests, dot_env_path=None, mapping=None): if validator.is_testcase_path(path_or_tests): return self.run_path(path_or_tests, dot_env_path, mapping) elif validator.is_testcases(path_or_tests): return self.run_tests(path_or_tests) else: raise exceptions.ParamsError("Invalid testcase path or testcases: {}".format(path_or_tests))
150,352
convert dict to params string Args: src_dict (dict): source mapping data structure Returns: str: string params data Examples: >>> src_dict = { "a": 1, "b": 2 } >>> convert_dict_to_params(src_dict) >>> "a=1&b=2"
def convert_dict_to_params(src_dict): return "&".join([ "{}={}".format(key, value) for key, value in src_dict.items() ])
150,358
deepcopy dict data, ignore file object (_io.BufferedReader) Args: data (dict): dict data structure { 'a': 1, 'b': [2, 4], 'c': lambda x: x+1, 'd': open('LICENSE'), 'f': { 'f1': {'a1': 2}, 'f2': io.open('LICENSE', 'rb'), } } Returns: dict: deep copied dict data, with file object unchanged.
def deepcopy_dict(data): try: return copy.deepcopy(data) except TypeError: copied_data = {} for key, value in data.items(): if isinstance(value, dict): copied_data[key] = deepcopy_dict(value) else: try: copied_data[key] = copy.deepcopy(value) except TypeError: copied_data[key] = value return copied_data
150,361
ensure variables are in mapping format. Args: variables (list/dict): original variables Returns: dict: ensured variables in dict format Examples: >>> variables = [ {"a": 1}, {"b": 2} ] >>> print(ensure_mapping_format(variables)) { "a": 1, "b": 2 }
def ensure_mapping_format(variables): if isinstance(variables, list): variables_dict = {} for map_dict in variables: variables_dict.update(map_dict) return variables_dict elif isinstance(variables, dict): return variables else: raise exceptions.ParamsError("variables format error!")
150,362
get and print testcase input(variables) and output. Args: testcase (unittest.suite.TestSuite): corresponding to one YAML/JSON file, it has been set two attributes: config: parsed config block runner: initialized runner.Runner() with config Returns: dict: input(variables) and output mapping.
def get_testcase_io(testcase): test_runner = testcase.runner variables = testcase.config.get("variables", {}) output_list = testcase.config.get("output", []) output_mapping = test_runner.extract_output(output_list) return { "in": variables, "out": output_mapping }
150,364
dump tests data to json file. the dumped file is located in PWD/logs folder. Args: json_data (list/dict): json data to dump project_mapping (dict): project info tag_name (str): tag name, loaded/parsed/summary
def dump_logs(json_data, project_mapping, tag_name): pwd_dir_path, dump_file_name = _prepare_dump_info(project_mapping, tag_name) dump_json_file(json_data, pwd_dir_path, dump_file_name)
150,372
load folder path, return all files endswith yml/yaml/json in list. Args: folder_path (str): specified folder path to load recursive (bool): load files recursively if True Returns: list: files endswith yml/yaml/json
def load_folder_files(folder_path, recursive=True): if isinstance(folder_path, (list, set)): files = [] for path in set(folder_path): files.extend(load_folder_files(path, recursive)) return files if not os.path.exists(folder_path): return [] file_list = [] for dirpath, dirnames, filenames in os.walk(folder_path): filenames_list = [] for filename in filenames: if not filename.endswith(('.yml', '.yaml', '.json')): continue filenames_list.append(filename) for filename in filenames_list: file_path = os.path.join(dirpath, filename) file_list.append(file_path) if not recursive: break return file_list
150,379
load .env file. Args: dot_env_path (str): .env file path Returns: dict: environment variables mapping { "UserName": "debugtalk", "Password": "123456", "PROJECT_KEY": "ABCDEFGH" } Raises: exceptions.FileFormatError: If .env file format is invalid.
def load_dot_env_file(dot_env_path): if not os.path.isfile(dot_env_path): return {} logger.log_info("Loading environment variables from {}".format(dot_env_path)) env_variables_mapping = {} with io.open(dot_env_path, 'r', encoding='utf-8') as fp: for line in fp: # maxsplit=1 if "=" in line: variable, value = line.split("=", 1) elif ":" in line: variable, value = line.split(":", 1) else: raise exceptions.FileFormatError(".env format error") env_variables_mapping[variable.strip()] = value.strip() utils.set_os_environ(env_variables_mapping) return env_variables_mapping
150,380
locate filename and return absolute file path. searching will be recursive upward until current working directory. Args: start_path (str): start locating path, maybe file path or directory path Returns: str: located file path. None if file not found. Raises: exceptions.FileNotFound: If failed to locate file.
def locate_file(start_path, file_name): if os.path.isfile(start_path): start_dir_path = os.path.dirname(start_path) elif os.path.isdir(start_path): start_dir_path = start_path else: raise exceptions.FileNotFound("invalid path: {}".format(start_path)) file_path = os.path.join(start_dir_path, file_name) if os.path.isfile(file_path): return os.path.abspath(file_path) # current working directory if os.path.abspath(start_dir_path) in [os.getcwd(), os.path.abspath(os.sep)]: raise exceptions.FileNotFound("{} not found in {}".format(file_name, start_path)) # locate recursive upward return locate_file(os.path.dirname(start_dir_path), file_name)
150,381
load python module functions. Args: module: python module Returns: dict: functions mapping for specified python module { "func1_name": func1, "func2_name": func2 }
def load_module_functions(module): module_functions = {} for name, item in vars(module).items(): if validator.is_function(item): module_functions[name] = item return module_functions
150,382
load api/testcases/testsuites definitions from folder. Args: folder_path (str): api/testcases/testsuites files folder. Returns: dict: api definition mapping. { "tests/api/basic.yml": [ {"api": {"def": "api_login", "request": {}, "validate": []}}, {"api": {"def": "api_logout", "request": {}, "validate": []}} ] }
def load_folder_content(folder_path): items_mapping = {} for file_path in load_folder_files(folder_path): items_mapping[file_path] = load_file(file_path) return items_mapping
150,388
load api, testcases, .env, debugtalk.py functions. api/testcases folder is relative to project_working_directory Args: test_path (str): test file/folder path, locate pwd from this path. dot_env_path (str): specified .env file path Returns: dict: project loaded api/testcases definitions, environments and debugtalk.py functions.
def load_project_tests(test_path, dot_env_path=None): # locate debugtalk.py file debugtalk_path = locate_debugtalk_py(test_path) if debugtalk_path: # The folder contains debugtalk.py will be treated as PWD. project_working_directory = os.path.dirname(debugtalk_path) else: # debugtalk.py not found, use os.getcwd() as PWD. project_working_directory = os.getcwd() # add PWD to sys.path sys.path.insert(0, project_working_directory) # load .env file # NOTICE: # environment variable maybe loaded in debugtalk.py # thus .env file should be loaded before loading debugtalk.py dot_env_path = dot_env_path or os.path.join(project_working_directory, ".env") project_mapping["env"] = load_dot_env_file(dot_env_path) if debugtalk_path: # load debugtalk.py functions debugtalk_functions = load_debugtalk_functions() else: debugtalk_functions = {} # locate PWD and load debugtalk.py functions project_mapping["PWD"] = project_working_directory built_in.PWD = project_working_directory project_mapping["functions"] = debugtalk_functions # load api tests_def_mapping["api"] = load_api_folder(os.path.join(project_working_directory, "api")) tests_def_mapping["PWD"] = project_working_directory
150,390
extract all variable names from content, which is in format $variable Args: content (str): string content Returns: list: variables list extracted from string content Examples: >>> regex_findall_variables("$variable") ["variable"] >>> regex_findall_variables("/blog/$postid") ["postid"] >>> regex_findall_variables("/$var1/$var2") ["var1", "var2"] >>> regex_findall_variables("abc") []
def regex_findall_variables(content): try: vars_list = [] for var_tuple in variable_regex_compile.findall(content): vars_list.append( var_tuple[0] or var_tuple[1] ) return vars_list except TypeError: return []
150,395
get variable from variables_mapping. Args: variable_name (str): variable name variables_mapping (dict): variables mapping Returns: mapping variable value. Raises: exceptions.VariableNotFound: variable is not found.
def get_mapping_variable(variable_name, variables_mapping): try: return variables_mapping[variable_name] except KeyError: raise exceptions.VariableNotFound("{} is not found.".format(variable_name))
150,397
get function from functions_mapping, if not found, then try to check if builtin function. Args: variable_name (str): variable name variables_mapping (dict): variables mapping Returns: mapping function object. Raises: exceptions.FunctionNotFound: function is neither defined in debugtalk.py nor builtin.
def get_mapping_function(function_name, functions_mapping): if function_name in functions_mapping: return functions_mapping[function_name] elif function_name in ["parameterize", "P"]: from httprunner import loader return loader.load_csv_file elif function_name in ["environ", "ENV"]: return utils.get_os_environ try: # check if HttpRunner builtin functions from httprunner import loader built_in_functions = loader.load_builtin_functions() return built_in_functions[function_name] except KeyError: pass try: # check if Python builtin functions item_func = eval(function_name) if callable(item_func): # is builtin function return item_func except (NameError, TypeError): # is not builtin function raise exceptions.FunctionNotFound("{} is not found.".format(function_name))
150,398
extend test with testcase definition test will merge and override testcase config definition. Args: test_dict (dict): test block testcase_def_dict (dict): testcase definition Returns: dict: extended test dict.
def _extend_with_testcase(test_dict, testcase_def_dict): # override testcase config variables testcase_def_dict["config"].setdefault("variables", {}) testcase_def_variables = utils.ensure_mapping_format(testcase_def_dict["config"].get("variables", {})) testcase_def_variables.update(test_dict.pop("variables", {})) testcase_def_dict["config"]["variables"] = testcase_def_variables # override base_url, verify # priority: testcase config > testsuite tests test_base_url = test_dict.pop("base_url", "") if not testcase_def_dict["config"].get("base_url"): testcase_def_dict["config"]["base_url"] = test_base_url # override name test_name = test_dict.pop("name", None) \ or testcase_def_dict["config"].pop("name", None) \ or "testcase name undefined" # override testcase config name, output, etc. testcase_def_dict["config"].update(test_dict) testcase_def_dict["config"]["name"] = test_name test_dict.clear() test_dict.update(testcase_def_dict)
150,406
parse testcase Args: testcase (dict): { "config": {}, "teststeps": [] }
def _parse_testcase(testcase, project_mapping, session_variables_set=None): testcase.setdefault("config", {}) prepared_config = __prepare_config( testcase["config"], project_mapping, session_variables_set ) prepared_testcase_tests = __prepare_testcase_tests( testcase["teststeps"], prepared_config, project_mapping, session_variables_set ) return { "config": prepared_config, "teststeps": prepared_testcase_tests }
150,409
init LazyFunction object with function_meta Args: function_meta (dict): function name, args and kwargs. { "func_name": "func", "args": [1, 2] "kwargs": {"a": 3, "b": 4} }
def __init__(self, function_meta, functions_mapping=None, check_variables_set=None): self.functions_mapping = functions_mapping or {} self.check_variables_set = check_variables_set or set() self.cache_key = None self.__parse(function_meta)
150,413
init func as lazy functon instance Args: function_meta (dict): function meta including name, args and kwargs
def __parse(self, function_meta): self._func = get_mapping_function( function_meta["func_name"], self.functions_mapping ) self.func_name = self._func.__name__ self._args = prepare_lazy_data( function_meta.get("args", []), self.functions_mapping, self.check_variables_set ) self._kwargs = prepare_lazy_data( function_meta.get("kwargs", {}), self.functions_mapping, self.check_variables_set ) if self.func_name == "load_csv_file": if len(self._args) != 1 or self._kwargs: raise exceptions.ParamsError("P() should only pass in one argument!") self._args = [self._args[0]] elif self.func_name == "get_os_environ": if len(self._args) != 1 or self._kwargs: raise exceptions.ParamsError("ENV() should only pass in one argument!") self._args = [self._args[0]]
150,414
parse raw string, replace function and variable with {} Args: raw_string(str): string with functions or varialbes e.g. "ABC${func2($a, $b)}DE$c" Returns: string: "ABC{}DE{}" args: ["${func2($a, $b)}", "$c"]
def __parse(self, raw_string): self._args = [] def escape_braces(origin_string): return origin_string.replace("{", "{{").replace("}", "}}") try: match_start_position = raw_string.index("$", 0) begin_string = raw_string[0:match_start_position] self._string = escape_braces(begin_string) except ValueError: self._string = escape_braces(raw_string) return while match_start_position < len(raw_string): # Notice: notation priority # $$ > ${func($a, $b)} > $var # search $$ dollar_match = dolloar_regex_compile.match(raw_string, match_start_position) if dollar_match: match_start_position = dollar_match.end() self._string += "$" continue # search function like ${func($a, $b)} func_match = function_regex_compile.match(raw_string, match_start_position) if func_match: function_meta = parse_function_params(func_match.group(1)) function_meta = { "func_name": func_match.group(1) } function_meta.update(parse_function_params(func_match.group(2))) lazy_func = LazyFunction( function_meta, self.functions_mapping, self.check_variables_set ) self._args.append(lazy_func) match_start_position = func_match.end() self._string += "{}" continue # search variable like ${var} or $var var_match = variable_regex_compile.match(raw_string, match_start_position) if var_match: var_name = var_match.group(1) or var_match.group(2) # check if any variable undefined in check_variables_set if var_name not in self.check_variables_set: raise exceptions.VariableNotFound(var_name) self._args.append(var_name) match_start_position = var_match.end() self._string += "{}" continue curr_position = match_start_position try: # find next $ location match_start_position = raw_string.index("$", curr_position+1) remain_string = raw_string[curr_position:match_start_position] except ValueError: remain_string = raw_string[curr_position:] # break while loop match_start_position = len(raw_string) self._string += escape_braces(remain_string)
150,419
check if path is testcase path or path list. Args: path (str/list): file path or file path list. Returns: bool: True if path is valid file path or path list, otherwise False.
def is_testcase_path(path): if not isinstance(path, (str, list)): return False if isinstance(path, list): for p in path: if not is_testcase_path(p): return False if isinstance(path, str): if not os.path.exists(path): return False return True
150,428
init test variables, called when each test(api) starts. variables_mapping will be evaluated first. Args: variables_mapping (dict) { "random": "${gen_random_string(5)}", "authorization": "${gen_md5($TOKEN, $data, $random)}", "data": '{"name": "user", "password": "123456"}', "TOKEN": "debugtalk", }
def init_test_variables(self, variables_mapping=None): variables_mapping = variables_mapping or {} variables_mapping = utils.ensure_mapping_format(variables_mapping) variables_mapping.update(self.session_variables_mapping) parsed_variables_mapping = parser.parse_variables_mapping(variables_mapping) self.test_variables_mapping = {} # priority: extracted variable > teststep variable self.test_variables_mapping.update(parsed_variables_mapping) self.test_variables_mapping.update(self.session_variables_mapping)
150,446
get summary from test result Args: result (instance): HtmlTestResult() instance Returns: dict: summary extracted from result. { "success": True, "stat": {}, "time": {}, "records": [] }
def get_summary(result): summary = { "success": result.wasSuccessful(), "stat": { 'total': result.testsRun, 'failures': len(result.failures), 'errors': len(result.errors), 'skipped': len(result.skipped), 'expectedFailures': len(result.expectedFailures), 'unexpectedSuccesses': len(result.unexpectedSuccesses) } } summary["stat"]["successes"] = summary["stat"]["total"] \ - summary["stat"]["failures"] \ - summary["stat"]["errors"] \ - summary["stat"]["skipped"] \ - summary["stat"]["expectedFailures"] \ - summary["stat"]["unexpectedSuccesses"] summary["time"] = { 'start_at': result.start_at, 'duration': result.duration } summary["records"] = result.records return summary
150,450
aggregate new_stat to origin_stat. Args: origin_stat (dict): origin stat dict, will be updated with new_stat dict. new_stat (dict): new stat dict.
def aggregate_stat(origin_stat, new_stat): for key in new_stat: if key not in origin_stat: origin_stat[key] = new_stat[key] elif key == "start_at": # start datetime origin_stat[key] = min(origin_stat[key], new_stat[key]) else: origin_stat[key] += new_stat[key]
150,451
expand meta_datas to one level Args: meta_datas (dict/list): maybe in nested format Returns: list: expanded list in one level Examples: >>> meta_datas = [ [ dict1, dict2 ], dict3 ] >>> meta_datas_expanded = [] >>> __expand_meta_datas(meta_datas, meta_datas_expanded) >>> print(meta_datas_expanded) [dict1, dict2, dict3]
def __expand_meta_datas(meta_datas, meta_datas_expanded): if isinstance(meta_datas, dict): meta_datas_expanded.append(meta_datas) elif isinstance(meta_datas, list): for meta_data in meta_datas: __expand_meta_datas(meta_data, meta_datas_expanded)
150,455
render html report with specified report name and template Args: report_template (str): specify html report template path report_dir (str): specify html report save directory
def render_html_report(summary, report_template=None, report_dir=None): if not report_template: report_template = os.path.join( os.path.abspath(os.path.dirname(__file__)), "templates", "report_template.html" ) logger.log_debug("No html report template specified, use default.") else: logger.log_info("render with html report template: {}".format(report_template)) logger.log_info("Start to render Html report ...") report_dir = report_dir or os.path.join(os.getcwd(), "reports") if not os.path.isdir(report_dir): os.makedirs(report_dir) start_at_timestamp = int(summary["time"]["start_at"]) summary["time"]["start_datetime"] = datetime.fromtimestamp(start_at_timestamp).strftime('%Y-%m-%d %H:%M:%S') report_path = os.path.join(report_dir, "{}.html".format(start_at_timestamp)) with io.open(report_template, "r", encoding='utf-8') as fp_r: template_content = fp_r.read() with io.open(report_path, 'w', encoding='utf-8') as fp_w: rendered_content = Template( template_content, extensions=["jinja2.ext.loopcontrols"] ).render(summary) fp_w.write(rendered_content) logger.log_info("Generated Html report: {}".format(report_path)) return report_path
150,458
response content could be json or html text. Args: field (str): string joined by delimiter. e.g. "status_code" "headers" "cookies" "content" "headers.content-type" "content.person.name.first_name"
def _extract_field_with_delimiter(self, field): # string.split(sep=None, maxsplit=-1) -> list of strings # e.g. "content.person.name" => ["content", "person.name"] try: top_query, sub_query = field.split('.', 1) except ValueError: top_query = field sub_query = None # status_code if top_query in ["status_code", "encoding", "ok", "reason", "url"]: if sub_query: # status_code.XX err_msg = u"Failed to extract: {}\n".format(field) logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) return getattr(self, top_query) # cookies elif top_query == "cookies": cookies = self.cookies if not sub_query: # extract cookies return cookies try: return cookies[sub_query] except KeyError: err_msg = u"Failed to extract cookie! => {}\n".format(field) err_msg += u"response cookies: {}\n".format(cookies) logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) # elapsed elif top_query == "elapsed": available_attributes = u"available attributes: days, seconds, microseconds, total_seconds" if not sub_query: err_msg = u"elapsed is datetime.timedelta instance, attribute should also be specified!\n" err_msg += available_attributes logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) elif sub_query in ["days", "seconds", "microseconds"]: return getattr(self.elapsed, sub_query) elif sub_query == "total_seconds": return self.elapsed.total_seconds() else: err_msg = "{} is not valid datetime.timedelta attribute.\n".format(sub_query) err_msg += available_attributes logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) # headers elif top_query == "headers": headers = self.headers if not sub_query: # extract headers return headers try: return headers[sub_query] except KeyError: err_msg = u"Failed to extract header! => {}\n".format(field) err_msg += u"response headers: {}\n".format(headers) logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) # response body elif top_query in ["content", "text", "json"]: try: body = self.json except exceptions.JSONDecodeError: body = self.text if not sub_query: # extract response body return body if isinstance(body, (dict, list)): # content = {"xxx": 123}, content.xxx return utils.query_json(body, sub_query) elif sub_query.isdigit(): # content = "abcdefg", content.3 => d return utils.query_json(body, sub_query) else: # content = "<html>abcdefg</html>", content.xxx err_msg = u"Failed to extract attribute from response body! => {}\n".format(field) err_msg += u"response body: {}\n".format(body) logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) # new set response attributes in teardown_hooks elif top_query in self.__dict__: attributes = self.__dict__[top_query] if not sub_query: # extract response attributes return attributes if isinstance(attributes, (dict, list)): # attributes = {"xxx": 123}, content.xxx return utils.query_json(attributes, sub_query) elif sub_query.isdigit(): # attributes = "abcdefg", attributes.3 => d return utils.query_json(attributes, sub_query) else: # content = "attributes.new_attribute_not_exist" err_msg = u"Failed to extract cumstom set attribute from teardown hooks! => {}\n".format(field) err_msg += u"response set attributes: {}\n".format(attributes) logger.log_error(err_msg) raise exceptions.TeardownHooksFailure(err_msg) # others else: err_msg = u"Failed to extract attribute from response! => {}\n".format(field) err_msg += u"available response attributes: status_code, cookies, elapsed, headers, content, text, json, encoding, ok, reason, url.\n\n" err_msg += u"If you want to set attribute in teardown_hooks, take the following example as reference:\n" err_msg += u"response.new_attribute = 'new_attribute_value'\n" logger.log_error(err_msg) raise exceptions.ParamsError(err_msg)
150,468
extract value from requests.Response and store in OrderedDict. Args: extractors (list): [ {"resp_status_code": "status_code"}, {"resp_headers_content_type": "headers.content-type"}, {"resp_content": "content"}, {"resp_content_person_first_name": "content.person.name.first_name"} ] Returns: OrderDict: variable binds ordered dict
def extract_response(self, extractors): if not extractors: return {} logger.log_debug("start to extract from response object.") extracted_variables_mapping = OrderedDict() extract_binds_order_dict = utils.ensure_mapping_format(extractors) for key, field in extract_binds_order_dict.items(): extracted_variables_mapping[key] = self.extract_field(field) return extracted_variables_mapping
150,470
Use this method to set a new profile photo. This method only works for Users. Bots profile photos must be set using BotFather. Args: photo (``str``): Profile photo to set. Pass a file path as string to upload a new photo that exists on your local machine. Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
def set_user_profile_photo( self, photo: str ) -> bool: return bool( self.send( functions.photos.UploadProfilePhoto( file=self.save_file(photo) ) ) )
150,488
Use this decorator to automatically register a function for handling user status updates. This does the same thing as :meth:`add_handler` using the :class:`UserStatusHandler`. Args: filters (:obj:`Filters <pyrogram.Filters>`): Pass one or more filters to allow only a subset of UserStatus updated to be passed in your function. group (``int``, *optional*): The group identifier, defaults to 0.
def on_user_status( self=None, filters=None, group: int = 0 ) -> callable: def decorator(func: callable) -> Tuple[Handler, int]: if isinstance(func, tuple): func = func[0].callback handler = pyrogram.UserStatusHandler(func, filters) if isinstance(self, Filter): return pyrogram.UserStatusHandler(func, self), group if filters is None else filters if self is not None: self.add_handler(handler, group) return handler, group return decorator
150,525
Use this method to get the number of members in a chat. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. Returns: On success, an integer is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ValueError`` if a chat_id belongs to user.
def get_chat_members_count( self, chat_id: Union[int, str] ) -> int: peer = self.resolve_peer(chat_id) if isinstance(peer, types.InputPeerChat): return self.send( functions.messages.GetChats( id=[peer.chat_id] ) ).chats[0].participants_count elif isinstance(peer, types.InputPeerChannel): return self.send( functions.channels.GetFullChannel( channel=peer ) ).full_chat.participants_count else: raise ValueError("The chat_id \"{}\" belongs to a user".format(chat_id))
150,531
Blocks the program execution until one of the signals are received, then gently stop the Client by closing the underlying connection. Args: stop_signals (``tuple``, *optional*): Iterable containing signals the signal handler will listen to. Defaults to (SIGINT, SIGTERM, SIGABRT).
def idle(self, stop_signals: tuple = (SIGINT, SIGTERM, SIGABRT)): def signal_handler(*args): self.is_idle = False for s in stop_signals: signal(s, signal_handler) self.is_idle = True while self.is_idle: time.sleep(1) self.stop()
150,537
Removes a previously-added update handler. Make sure to provide the right group that the handler was added in. You can use the return value of the :meth:`add_handler` method, a tuple of (handler, group), and pass it directly. Args: handler (``Handler``): The handler to be removed. group (``int``, *optional*): The group identifier, defaults to 0.
def remove_handler(self, handler: Handler, group: int = 0): if isinstance(handler, DisconnectHandler): self.disconnect_handler = None else: self.dispatcher.remove_handler(handler, group)
150,539
Use this decorator to automatically register a function for handling raw updates. This does the same thing as :meth:`add_handler` using the :class:`RawUpdateHandler`. Args: group (``int``, *optional*): The group identifier, defaults to 0.
def on_raw_update( self=None, group: int = 0 ) -> callable: def decorator(func: callable) -> Tuple[Handler, int]: if isinstance(func, tuple): func = func[0].callback handler = pyrogram.RawUpdateHandler(func) if isinstance(self, int): return handler, group if self is None else group if self is not None: self.add_handler(handler, group) return handler, group return decorator
150,616