code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def separate_citations(text: str) -> str: """ Separates multiple citations within square brackets into individual citations. Args: text (str): The input string containing citations. Returns: str: The string with separated citations. """ # Define a function to process each match def replace_citations(match): citations = match.group(1).split(",") return "".join(f"[{citation.strip()}]" for citation in citations) # Use regular expressions to find and replace citations pattern = re.compile(r"\[(\d+(?:,\s*\d+)*)\]") return pattern.sub(replace_citations, text)
Separates multiple citations within square brackets into individual citations. Args: text (str): The input string containing citations. Returns: str: The string with separated citations.
separate_citations
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
MIT
def extract_and_remove_citations(text: str) -> Tuple[str, List[int]]: """ Removes single inline citations from the input string and returns the modified string and a list of citation integers. Args: text (str): The input string containing citations. Returns: Tuple[str, List[int]]: The string after removal of citations and a list of citation integers. """ citations = [] # Define a function to process each match def extract_citation(match): citation = int(match.group(1)) citations.append(citation) return "" # Use regular expressions to find and replace citations pattern = re.compile(r"\[(\d+)\]") modified_text = pattern.sub(extract_citation, text) return modified_text, citations
Removes single inline citations from the input string and returns the modified string and a list of citation integers. Args: text (str): The input string containing citations. Returns: Tuple[str, List[int]]: The string after removal of citations and a list of citation integers.
extract_and_remove_citations
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
MIT
def keep_first_and_last_paragraph(text: str) -> str: """ Processes the input text to keep the first and last paragraphs and replace the middle paragraphs with '[content omitted due to space limit]'. Args: text (str): The input text containing paragraphs separated by '\n\n'. Returns: str: The processed text. """ paragraphs = text.split("\n\n") if len(paragraphs) <= 3: return text first_paragraph = paragraphs[0] last_paragraph = "\n\n".join(paragraphs[-2:]) return ( f"{first_paragraph}\n\n[content omitted due to space limit]\n\n{last_paragraph}" )
Processes the input text to keep the first and last paragraphs and replace the middle paragraphs with '[content omitted due to space limit]'. Args: text (str): The input text containing paragraphs separated by '\n\n'. Returns: str: The processed text.
keep_first_and_last_paragraph
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
MIT
def clean_up_section(text): """Clean up a section: 1. Remove uncompleted sentences (usually due to output token limitation). 2. Deduplicate individual groups of citations. 3. Remove unnecessary summary.""" paragraphs = text.split("\n") output_paragraphs = [] summary_sec_flag = False for p in paragraphs: p = p.strip() if len(p) == 0: continue if not p.startswith("#"): p = separate_citations(p) if summary_sec_flag: if p.startswith("#"): summary_sec_flag = False else: continue if ( p.startswith("Overall") or p.startswith("In summary") or p.startswith("In conclusion") ): continue if "# Summary" in p or "# Conclusion" in p: summary_sec_flag = True continue output_paragraphs.append(p) return "\n\n".join(output_paragraphs) # Join with '\n\n' for markdown format.
Clean up a section: 1. Remove uncompleted sentences (usually due to output token limitation). 2. Deduplicate individual groups of citations. 3. Remove unnecessary summary.
clean_up_section
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
MIT
def forward( self, topic: str, question: str, mode: str = "brief", style: str = "conversational", callback_handler: BaseCallbackHandler = None, ): """ Processes a topic and question to generate a response with relevant information and citations. Args: topic (str): The topic of interest. question (str): The specific question related to the topic. mode (str, optional): Mode of summarization. 'brief' takes only the first snippet of each Information. 'extensive' adds snippets iteratively until the word limit is reached. Defaults to 'brief'. Returns: dspy.Prediction: An object containing the following: - question (str): the question to answer - queries (List[str]): List of query strings used for information retrieval. - raw_retrieved_info (List[Information]): List of Information instances retrieved. - cited_info (Dict[int, Information]): Dictionary of cited Information instances, indexed by their citation number. - response (str): The generated response string with inline citations. """ # retrieve information if callback_handler is not None: callback_handler.on_expert_information_collection_start() queries, searched_results = self.retrieve_information( topic=topic, question=question ) if callback_handler is not None: callback_handler.on_expert_information_collection_end(searched_results) # format information string for answer generation info_text, index_to_information_mapping = format_search_results( searched_results, mode=mode ) answer = "Sorry, there is insufficient information to answer the question." # generate answer to the question if info_text: with self.logging_wrapper.log_event( f"AnswerQuestionModule.answer_question ({hash(question)})" ): with dspy.settings.context( lm=self.question_answering_lm, show_guidelines=False ): answer = self.answer_question( topic=topic, question=question, info=info_text, style=style ).answer answer = ArticleTextProcessing.remove_uncompleted_sentences_with_citations( answer ) answer = trim_output_after_hint( answer, hint="Now give your response. (Try to use as many different sources as possible and do not hallucinate.)", ) # enforce single citation index bracket. [1, 2] -> [1][2] answer = separate_citations(answer) if callback_handler is not None: callback_handler.on_expert_utterance_generation_end() # construct cited search result cited_searched_results = extract_cited_storm_info( response=answer, index_to_storm_info=index_to_information_mapping ) return dspy.Prediction( question=question, queries=queries, raw_retrieved_info=searched_results, cited_info=cited_searched_results, response=answer, )
Processes a topic and question to generate a response with relevant information and citations. Args: topic (str): The topic of interest. question (str): The specific question related to the topic. mode (str, optional): Mode of summarization. 'brief' takes only the first snippet of each Information. 'extensive' adds snippets iteratively until the word limit is reached. Defaults to 'brief'. Returns: dspy.Prediction: An object containing the following: - question (str): the question to answer - queries (List[str]): List of query strings used for information retrieval. - raw_retrieved_info (List[Information]): List of Information instances retrieved. - cited_info (Dict[int, Information]): Dictionary of cited Information instances, indexed by their citation number. - response (str): The generated response string with inline citations.
forward
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/grounded_question_answering.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/grounded_question_answering.py
MIT
def on_turn_policy_planning_start(self, **kwargs): """Run when the turn policy planning begins, before deciding the direction or goal for the next conversation turn.""" pass
Run when the turn policy planning begins, before deciding the direction or goal for the next conversation turn.
on_turn_policy_planning_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_action_planning_start(self, **kwargs): """Run when the expert action planning begins, preparing to determine the actions that each expert should take.""" pass
Run when the expert action planning begins, preparing to determine the actions that each expert should take.
on_expert_action_planning_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_action_planning_end(self, **kwargs): """Run when the expert action planning ends, after deciding the actions that each expert should take.""" pass
Run when the expert action planning ends, after deciding the actions that each expert should take.
on_expert_action_planning_end
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_information_collection_start(self, **kwargs): """Run when the expert information collection starts, start gathering all necessary data from selected sources.""" pass
Run when the expert information collection starts, start gathering all necessary data from selected sources.
on_expert_information_collection_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_information_collection_end(self, info: List[Information], **kwargs): """Run when the expert information collection ends, after gathering all necessary data from selected sources.""" pass
Run when the expert information collection ends, after gathering all necessary data from selected sources.
on_expert_information_collection_end
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_utterance_generation_end(self, **kwargs): """Run when the expert utterance generation ends, before creating responses or statements from each expert.""" pass
Run when the expert utterance generation ends, before creating responses or statements from each expert.
on_expert_utterance_generation_end
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_utterance_polishing_start(self, **kwargs): """Run when the expert utterance polishing begins, to refine and improve the clarity and coherence of generated content.""" pass
Run when the expert utterance polishing begins, to refine and improve the clarity and coherence of generated content.
on_expert_utterance_polishing_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_mindmap_insert_start(self, **kwargs): """Run when the process of inserting new information into the mindmap starts.""" pass
Run when the process of inserting new information into the mindmap starts.
on_mindmap_insert_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_mindmap_insert_end(self, **kwargs): """Run when the process of inserting new information into the mindmap ends.""" pass
Run when the process of inserting new information into the mindmap ends.
on_mindmap_insert_end
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_mindmap_reorg_start(self, **kwargs): """Run when the reorganization of the mindmap begins, to restructure and optimize the flow of information.""" pass
Run when the reorganization of the mindmap begins, to restructure and optimize the flow of information.
on_mindmap_reorg_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_list_update_start(self, **kwargs): """Run when the expert list update starts, to modify or refresh the list of active experts.""" pass
Run when the expert list update starts, to modify or refresh the list of active experts.
on_expert_list_update_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_article_generation_start(self, **kwargs): """Run when the article generation process begins, to compile and format the final article content.""" pass
Run when the article generation process begins, to compile and format the final article content.
on_article_generation_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_warmstart_update(self, message, **kwargs): """Run when the warm start process has update.""" pass
Run when the warm start process has update.
on_warmstart_update
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_turn_policy_planning_start(self, **kwargs): """Run when the turn policy planning begins, before deciding the direction or goal for the next conversation turn.""" print("Start planning next expert; inspect mind map; inspect system state.")
Run when the turn policy planning begins, before deciding the direction or goal for the next conversation turn.
on_turn_policy_planning_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_action_planning_start(self, **kwargs): """Run when the expert action planning begins, preparing to determine the actions that each expert should take.""" print("Reviewing discourse history; Deciding utterance intent.")
Run when the expert action planning begins, preparing to determine the actions that each expert should take.
on_expert_action_planning_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_information_collection_start(self, **kwargs): """Run when the expert information collection ends, after gathering all necessary data from selected sources.""" print("Start searching with the search engine; browsing collected information.")
Run when the expert information collection ends, after gathering all necessary data from selected sources.
on_expert_information_collection_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_information_collection_end(self, info: List[Information], **kwargs): """Run when the expert information collection ends, after gathering all necessary data from selected sources.""" if info: urls = [i.url for i in info] information_string = "\n".join([f"Finish browsing {url}" for url in urls]) print(information_string)
Run when the expert information collection ends, after gathering all necessary data from selected sources.
on_expert_information_collection_end
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_utterance_generation_end(self, **kwargs): """Run when the expert utterance generation ends, before creating responses or statements from each expert.""" print("Finish generating utterance from collected information.")
Run when the expert utterance generation ends, before creating responses or statements from each expert.
on_expert_utterance_generation_end
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_utterance_polishing_start(self, **kwargs): """Run when the expert utterance polishing begins, to refine and improve the clarity and coherence of generated content.""" print("Start polishing utterance.")
Run when the expert utterance polishing begins, to refine and improve the clarity and coherence of generated content.
on_expert_utterance_polishing_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_mindmap_insert_start(self, **kwargs): """Run when the process of inserting new information into the mindmap starts.""" print("Start inserting information into mind map.")
Run when the process of inserting new information into the mindmap starts.
on_mindmap_insert_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_mindmap_insert_end(self, **kwargs): """Run when the process of inserting new information into the mindmap ends.""" print("Finish inserting information into mind map.")
Run when the process of inserting new information into the mindmap ends.
on_mindmap_insert_end
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_mindmap_reorg_start(self, **kwargs): """Run when the reorganization of the mindmap begins, to restructure and optimize the flow of information.""" print("Start re-organizing mind map.")
Run when the reorganization of the mindmap begins, to restructure and optimize the flow of information.
on_mindmap_reorg_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_expert_list_update_start(self, **kwargs): """Run when the expert list update starts, to modify or refresh the list of active experts.""" print("Start updating expert candidates.")
Run when the expert list update starts, to modify or refresh the list of active experts.
on_expert_list_update_start
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def on_warmstart_update(self, message, **kwargs): """Run when the warm start process has update.""" print(f"Warm start update: {message}")
Run when the warm start process has update.
on_warmstart_update
python
stanford-oval/storm
knowledge_storm/collaborative_storm/modules/callback.py
https://github.com/stanford-oval/storm/blob/master/knowledge_storm/collaborative_storm/modules/callback.py
MIT
def parse_input(input_parameter): """From a syntax like package_name#submodule, build a package name and complete module name. """ split_package_name = input_parameter.split('#') package_name = split_package_name[0] module_name = package_name.replace("-", ".") if len(split_package_name) >= 2: module_name = ".".join([module_name, split_package_name[1]]) return package_name, module_name
From a syntax like package_name#submodule, build a package name and complete module name.
parse_input
python
Azure/azure-cli
build_scripts/windows/scripts/patch_models_v2.py
https://github.com/Azure/azure-cli/blob/master/build_scripts/windows/scripts/patch_models_v2.py
MIT
def solve_one_model(models_module, output_folder): """Will build the compacted models in the output_folder""" models_classes = [ (len(model_class.__mro__), model_name, inspect.getfile(model_class), model_class) for model_name, model_class in vars(models_module).items() if model_name[0].isupper() and Model in model_class.__mro__ ] # Sort on MRO size first, and then alphabetically models_classes.sort(key=lambda x: (x[0], x[1])) # Just need the name of exceptions exceptions_classes = [ model_name for model_name, model_class in vars(models_module).items() if model_name[0].isupper() and HttpOperationError in model_class.__mro__ ] py2_models_classes = [ (len_mro, model_name, path.replace("_py3.py", ".py"), None) for len_mro, model_name, path, _ in models_classes ] paged_models_classes = [ (model_name, inspect.getfile(model_class), model_class) for model_name, model_class in vars(models_module).items() if model_name[0].isupper() and Paged in model_class.__mro__ ] enum_models_classes = [ (model_name, inspect.getfile(model_class), model_class) for model_name, model_class in vars(models_module).items() if model_name[0].isupper() and Enum in model_class.__mro__ ] if enum_models_classes: # Can't be more than one enum file enum_file = Path(enum_models_classes[0][1]) enum_file_module_name = "_"+enum_file.with_suffix('').name shutil.copyfile(enum_file, Path(output_folder, as_file_name(enum_file_module_name))) else: enum_file_module_name = None write_model_file(Path(output_folder, as_file_name(MODEL_PY3_NAME)), models_classes) write_model_file(Path(output_folder, as_file_name(MODEL_PY2_NAME)), py2_models_classes) write_paging_file(Path(output_folder, as_file_name(PAGED_NAME)), paged_models_classes) write_complete_init( Path(output_folder, "__init__.py"), models_classes, exceptions_classes, paged_models_classes, enum_models_classes, enum_file_module_name )
Will build the compacted models in the output_folder
solve_one_model
python
Azure/azure-cli
build_scripts/windows/scripts/patch_models_v2.py
https://github.com/Azure/azure-cli/blob/master/build_scripts/windows/scripts/patch_models_v2.py
MIT
def find_models_to_change(module_name): """Will figure out if the package is a multi-api one, and understand what to generate. """ main_module = importlib.import_module(module_name) try: models_module = main_module.models models_module.__path__ # It didn't fail, that's a single API package return [models_module] except AttributeError: # This means I loaded the fake module "models" # and it's multi-api, load all models return [ importlib.import_module('.'+label+'.models', main_module.__name__) for (_, label, ispkg) in pkgutil.iter_modules(main_module.__path__) if ispkg and label != 'aio' ]
Will figure out if the package is a multi-api one, and understand what to generate.
find_models_to_change
python
Azure/azure-cli
build_scripts/windows/scripts/patch_models_v2.py
https://github.com/Azure/azure-cli/blob/master/build_scripts/windows/scripts/patch_models_v2.py
MIT
def find_autorest_generated_folder(module_prefix="azure.mgmt"): """Find all Autorest generated code in that module prefix. This actually looks for a "models" package only. We could be smarter if necessary. """ _LOGGER.info(f"Looking for Autorest generated package in {module_prefix}") result = [] try: _LOGGER.debug(f"Try {module_prefix}") importlib.import_module(".models", module_prefix) # If not exception, we found it _LOGGER.info(f"Found {module_prefix}") result.append(module_prefix) except ModuleNotFoundError: # No model, might dig deeper prefix_module = importlib.import_module(module_prefix) for _, sub_package, ispkg in pkgutil.iter_modules(prefix_module.__path__, module_prefix+"."): if ispkg: result += find_autorest_generated_folder(sub_package) return result
Find all Autorest generated code in that module prefix. This actually looks for a "models" package only. We could be smarter if necessary.
find_autorest_generated_folder
python
Azure/azure-cli
build_scripts/windows/scripts/patch_models_v2.py
https://github.com/Azure/azure-cli/blob/master/build_scripts/windows/scripts/patch_models_v2.py
MIT
def exec_command(command, cwd=None, stdout=None, env=None): """Returns True in the command was executed successfully""" try: command_list = command if isinstance(command, list) else command.split() env_vars = os.environ.copy() if env: env_vars.update(env) subprocess.check_call(command_list, stdout=stdout, cwd=cwd, env=env_vars) return True except subprocess.CalledProcessError as err: print(err, file=sys.stderr) return False
Returns True in the command was executed successfully
exec_command
python
Azure/azure-cli
tools/automation/tests/verify_readme_history.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/tests/verify_readme_history.py
MIT
def check_recordings(): """Scan VCR recordings for access tokens""" result = [] for name, _, test_folder in get_command_modules_paths_with_tests(): print(test_folder) for recording_file in glob.glob(os.path.join(test_folder, 'recordings', '*.yaml')): print('Scanning: {}'.format(recording_file)) with open(recording_file, 'r') as f: for line in f: line = line.lower() if 'grant_type=refresh_token' in line or '/oauth2/token' in line \ or 'authorization:' in line: result.append(recording_file) break if result: print('Following VCR recording files contain tokens:') for f in result: print(' {}'.format(f)) sys.exit(1) sys.exit(0)
Scan VCR recordings for access tokens
check_recordings
python
Azure/azure-cli
tools/automation/tests/check_vcr_recordings.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/tests/check_vcr_recordings.py
MIT
def validate_usage(args): """ Ensure conflicting options aren't specified. """ test_usage = '[--test TESTS [TESTS ...]] [--src-file FILENAME]' ci_usage = '--ci' usages = [] if args.tests or args.src_file: usages.append(test_usage) if args.ci: usages.append(ci_usage) if len(usages) > 1: display('usage error: ' + ' | '.join(usages)) sys.exit(1)
Ensure conflicting options aren't specified.
validate_usage
python
Azure/azure-cli
tools/automation/tests/__init__.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/tests/__init__.py
MIT
def discover_tests(args): """ Builds an index of tests so that the user can simply supply the name they wish to test instead of the full path. """ from importlib import import_module import pkgutil CORE_EXCLUSIONS = ['command_modules', '__main__', 'testsdk'] profile_split = args.profile.split('-') profile_namespace = '_'.join([profile_split[-1]] + profile_split[:-1]) mods_ns_pkg = import_module('azure.cli.command_modules') core_ns_pkg = import_module('azure.cli') command_modules = list(pkgutil.iter_modules(mods_ns_pkg.__path__)) core_modules = list(pkgutil.iter_modules(core_ns_pkg.__path__)) extensions = get_extension_modules() all_modules = command_modules + [x for x in core_modules if x[1] not in CORE_EXCLUSIONS] + extensions display(""" ================== Discover Tests ================== """) module_data = {} for mod in all_modules: mod_name = mod[1] if mod_name == 'core' or mod_name == 'telemetry': mod_data = { 'filepath': os.path.join(mod[0].path, mod_name, 'tests'), 'base_path': 'azure.cli.{}.tests'.format(mod_name), 'files': {} } elif mod_name.startswith('azext_'): mod_data = { 'filepath': os.path.join(mod[0].path, 'tests', profile_namespace), 'base_path': '{}.tests.{}'.format(mod_name, profile_namespace), 'files': {} } else: mod_data = { 'filepath': os.path.join(mod[0].path, mod_name, 'tests', profile_namespace), 'base_path': 'azure.cli.command_modules.{}.tests.{}'.format(mod_name, profile_namespace), 'files': {} } # get the list of test files in each module try: contents = os.listdir(mod_data['filepath']) test_files = {x[:-len('.py')]: {} for x in contents if x.startswith('test_') and x.endswith('.py')} except Exception: # skip modules that don't have tests display("Module '{}' has no tests.".format(mod_name)) continue for file_name in test_files: mod_data['files'][file_name] = {} test_file_path = mod_data['base_path'] + '.' + file_name try: module = import_module(test_file_path) except ImportError as ex: display('Unable to import {}. Reason: {}'.format(test_file_path, ex)) continue module_dict = module.__dict__ possible_test_classes = {x: y for x, y in module_dict.items() if not x.startswith('_')} for class_name, class_def in possible_test_classes.items(): try: class_dict = class_def.__dict__ except AttributeError: # skip non-class symbols in files like constants, imported methods, etc. continue if class_dict.get('__module__') == test_file_path: tests = [x for x in class_def.__dict__ if x.startswith('test_')] if tests: mod_data['files'][file_name][class_name] = tests module_data[mod_name] = mod_data test_index = {} conflicted_keys = [] def add_to_index(key, path): key = key or mod_name if key in test_index: if key not in conflicted_keys: conflicted_keys.append(key) mod1 = extract_module_name(path) mod2 = extract_module_name(test_index[key]) if mod1 != mod2: # resolve conflicted keys by prefixing with the module name and a dot (.) display("\nCOLLISION: Test '{}' exists in both '{}' and '{}'. Resolve using <MOD_NAME>.<NAME>".format(key, mod1, mod2)) test_index['{}.{}'.format(mod1, key)] = path test_index['{}.{}'.format(mod2, key)] = test_index[key] else: display("\nERROR: Test '{}' exists twice in the '{}' module. Please rename one or both and re-run --discover.".format(key, mod1)) else: test_index[key] = path # build the index for mod_name, mod_data in module_data.items(): mod_path = mod_data['filepath'] for file_name, file_data in mod_data['files'].items(): file_path = os.path.join(mod_path, file_name) + '.py' for class_name, test_list in file_data.items(): for test_name in test_list: test_path = '{}:{}.{}'.format(file_path, class_name, test_name) add_to_index(test_name, test_path) class_path = '{}:{}'.format(file_path, class_name) add_to_index(class_name, class_path) add_to_index(file_name, file_path) add_to_index(mod_name, mod_path) # remove the conflicted keys since they would arbitrarily point to a random implementation for key in conflicted_keys: del test_index[key] return test_index
Builds an index of tests so that the user can simply supply the name they wish to test instead of the full path.
discover_tests
python
Azure/azure-cli
tools/automation/tests/__init__.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/tests/__init__.py
MIT
def get_nose_runner(parallel=True, process_timeout=600, process_restart=True): """Create a nose execution method""" def _run_nose(test_folders): import nose import os.path arguments = ['nosetests', __file__, '-v', '-c', os.path.join(get_repo_root(), 'nose.cfg')] if parallel: arguments += ['--processes=-1', '--process-timeout={}'.format(process_timeout)] if process_restart: arguments += ['--process-restartworker'] arguments.extend(test_folders) result = nose.run(argv=arguments) return result return _run_nose
Create a nose execution method
get_nose_runner
python
Azure/azure-cli
tools/automation/tests/nose_helper.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/tests/nose_helper.py
MIT
def mean(data): """Return the sample arithmetic mean of data.""" n = len(data) if n < 1: raise ValueError('len < 1') return sum(data)/float(n)
Return the sample arithmetic mean of data.
mean
python
Azure/azure-cli
tools/automation/verify/verify_module_load_times.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/verify/verify_module_load_times.py
MIT
def sq_deviation(data): """Return sum of square deviations of sequence data.""" c = mean(data) return sum((x-c)**2 for x in data)
Return sum of square deviations of sequence data.
sq_deviation
python
Azure/azure-cli
tools/automation/verify/verify_module_load_times.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/verify/verify_module_load_times.py
MIT
def pstdev(data): """Calculates the population standard deviation.""" n = len(data) if n < 2: raise ValueError('len < 2') ss = sq_deviation(data) return (ss/n) ** 0.5
Calculates the population standard deviation.
pstdev
python
Azure/azure-cli
tools/automation/verify/verify_module_load_times.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/verify/verify_module_load_times.py
MIT
def get_repo_root(): """Returns the path to the source code root directory""" current_dir = os.path.dirname(os.path.abspath(__file__)) while not os.path.exists(os.path.join(current_dir, 'CONTRIBUTING.rst')): current_dir = os.path.dirname(current_dir) return current_dir
Returns the path to the source code root directory
get_repo_root
python
Azure/azure-cli
tools/automation/utilities/path.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/utilities/path.py
MIT
def get_all_module_paths(): """List all core and command modules""" return list(get_core_modules_paths()) + list(get_command_modules_paths(include_prefix=True))
List all core and command modules
get_all_module_paths
python
Azure/azure-cli
tools/automation/utilities/path.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/utilities/path.py
MIT
def get_config_dir(): """ Returns the users Azure directory. """ return os.getenv('AZURE_CONFIG_DIR', None) or os.path.expanduser(os.path.join('~', '.azure'))
Returns the users Azure directory.
get_config_dir
python
Azure/azure-cli
tools/automation/utilities/path.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/utilities/path.py
MIT
def get_extension_dir(): """ Returns the extensions directory. """ custom_dir = os.environ.get('AZURE_EXTENSION_DIR') return os.path.expanduser(custom_dir) if custom_dir else os.path.join(get_config_dir(), 'cliextensions')
Returns the extensions directory.
get_extension_dir
python
Azure/azure-cli
tools/automation/utilities/path.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/utilities/path.py
MIT
def make_dirs(path): """Create a directories recursively""" import errno try: os.makedirs(path) except OSError as exc: # Python <= 2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise
Create a directories recursively
make_dirs
python
Azure/azure-cli
tools/automation/utilities/path.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/utilities/path.py
MIT
def get_test_results_dir(with_timestamp=None, prefix=None): """Returns the folder where test results should be saved to. If the folder doesn't exist, it will be created.""" result = os.path.join(get_repo_root(), 'test_results') if isinstance(with_timestamp, bool): from datetime import datetime with_timestamp = datetime.now() if with_timestamp: if prefix: result = os.path.join(result, with_timestamp.strftime(prefix + '_%Y%m%d_%H%M%S')) else: result = os.path.join(result, with_timestamp.strftime('%Y%m%d_%H%M%S')) if not os.path.exists(result): make_dirs(result) if not os.path.exists(result) or not os.path.isdir(result): raise Exception('Failed to create test result dir {}'.format(result)) return result
Returns the folder where test results should be saved to. If the folder doesn't exist, it will be created.
get_test_results_dir
python
Azure/azure-cli
tools/automation/utilities/path.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/utilities/path.py
MIT
def filter_blacklisted_modules(*black_list_modules): """Returns the paths to the modules except those in the black list.""" import itertools existing_modules = list(itertools.chain(get_core_modules_paths(), get_command_modules_paths())) black_list_modules = set(black_list_modules) return list((name, path) for name, path in existing_modules if name not in black_list_modules)
Returns the paths to the modules except those in the black list.
filter_blacklisted_modules
python
Azure/azure-cli
tools/automation/utilities/path.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/utilities/path.py
MIT
def display(txt): """ Output to stderr """ print(txt, file=sys.stderr)
Output to stderr
display
python
Azure/azure-cli
tools/automation/utilities/display.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/utilities/display.py
MIT
def output(txt): """ Output to stdout """ print(txt, file=sys.stdout)
Output to stdout
output
python
Azure/azure-cli
tools/automation/utilities/display.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/utilities/display.py
MIT
def get_print_format(records): """Find the best format to display the given list of records in table format""" if not records: raise ValueError('missing parameter records') if not isinstance(records, list): raise ValueError('records is not a list') size = len(records[0]) max_len = [0] * size col_index = list(range(size)) for rec in records: if len(rec) != size: raise ValueError('size of elements in the records set are not equal') for i in col_index: max_len[i] = max(max_len[i], len(str(rec[i]))) recommend_format = '' for each in max_len: recommend_format += '{:' + str(each + 2) + '}' return recommend_format, max_len
Find the best format to display the given list of records in table format
get_print_format
python
Azure/azure-cli
tools/automation/utilities/display.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/utilities/display.py
MIT
def print_records(records, print_format=None, title=None, foot_notes=None): """Print a list of tuples with a print format.""" print_format = print_format or get_print_format(records)[0] if print_format is None: raise ValueError('print format is required') print() print("Summary" + ': {}'.format(title) if title is not None else '') print("==========================") for rec in records: print(print_format.format(*rec)) print("==========================") if foot_notes: for each in foot_notes: print('* ' + each)
Print a list of tuples with a print format.
print_records
python
Azure/azure-cli
tools/automation/utilities/display.py
https://github.com/Azure/azure-cli/blob/master/tools/automation/utilities/display.py
MIT
def _query_account_key(cli_ctx, account_name): """Query the storage account key. This is used when the customer doesn't offer account key but name.""" rg, scf = _query_account_rg(cli_ctx, account_name) t_storage_account_keys = get_sdk( cli_ctx, ResourceType.MGMT_STORAGE, 'models.storage_account_keys#StorageAccountKeys') logger.debug('Disable HTTP logging to avoid having storage keys in debug logs') if t_storage_account_keys: return scf.storage_accounts.list_keys(rg, account_name, logging_enable=False).key1 # of type: models.storage_account_list_keys_result#StorageAccountListKeysResult return scf.storage_accounts.list_keys(rg, account_name, logging_enable=False).keys[0].value # pylint: disable=no-member
Query the storage account key. This is used when the customer doesn't offer account key but name.
_query_account_key
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def _query_account_rg(cli_ctx, account_name): """Query the storage account's resource group, which the mgmt sdk requires.""" scf = storage_client_factory(cli_ctx) acc = next((x for x in scf.storage_accounts.list() if x.name == account_name), None) if acc: from azure.mgmt.core.tools import parse_resource_id return parse_resource_id(acc.id)['resource_group'], scf raise ValueError("Storage account '{}' not found.".format(account_name))
Query the storage account's resource group, which the mgmt sdk requires.
_query_account_rg
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def parse_storage_account(cmd, namespace): """Parse storage account which can be either account name or account id""" from azure.mgmt.core.tools import parse_resource_id, is_valid_resource_id if namespace.account_name and is_valid_resource_id(namespace.account_name): namespace.resource_group_name = parse_resource_id(namespace.account_name)['resource_group'] namespace.account_name = parse_resource_id(namespace.account_name)['name'] elif namespace.account_name and not is_valid_resource_id(namespace.account_name) and \ not namespace.resource_group_name: namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
Parse storage account which can be either account name or account id
parse_storage_account
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def process_resource_group(cmd, namespace): """Processes the resource group parameter from the account name""" if namespace.account_name and not namespace.resource_group_name: namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
Processes the resource group parameter from the account name
process_resource_group
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def validate_client_parameters(cmd, namespace): """ Retrieves storage connection parameters from environment variables and parses out connection string into account name and key """ n = namespace if hasattr(n, 'auth_mode'): auth_mode = n.auth_mode or get_config_value(cmd, 'storage', 'auth_mode', None) del n.auth_mode if not n.account_name: n.account_name = get_config_value(cmd, 'storage', 'account', None) if auth_mode == 'login': prefix = cmd.command_kwargs['resource_type'].value[0] # is_storagv2() is used to distinguish if the command is in track2 SDK # If yes, we will use get_login_credentials() as token credential if is_storagev2(prefix): from azure.cli.core._profile import Profile profile = Profile(cli_ctx=cmd.cli_ctx) n.token_credential, _, _ = profile.get_login_credentials(subscription_id=n._subscription) # Otherwise, we will assume it is in track1 and keep previous token updater else: n.token_credential = _create_token_credential(cmd.cli_ctx) if hasattr(n, 'token_credential') and n.token_credential: # give warning if there are account key args being ignored account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token", n.connection_string and "--connection-string"] account_key_args = [arg for arg in account_key_args if arg] if account_key_args: logger.warning('In "login" auth mode, the following arguments are ignored: %s', ' ,'.join(account_key_args)) return if not n.connection_string: n.connection_string = get_config_value(cmd, 'storage', 'connection_string', None) # if connection string supplied or in environment variables, extract account key and name if n.connection_string: conn_dict = validate_key_value_pairs(n.connection_string) n.account_name = conn_dict.get('AccountName') n.account_key = conn_dict.get('AccountKey') n.sas_token = conn_dict.get('SharedAccessSignature') # otherwise, simply try to retrieve the remaining variables from environment variables if not n.account_name: n.account_name = get_config_value(cmd, 'storage', 'account', None) if not n.account_key: n.account_key = get_config_value(cmd, 'storage', 'key', None) if not n.sas_token: n.sas_token = get_config_value(cmd, 'storage', 'sas_token', None) # strip the '?' from sas token. the portal and command line are returns sas token in different # forms if n.sas_token: n.sas_token = n.sas_token.lstrip('?') # if account name is specified but no key, attempt to query if n.account_name and not n.account_key and not n.sas_token: logger.warning('There are no credentials provided in your command and environment, we will query for the ' 'account key inside your storage account. \nPlease provide --connection-string, ' '--account-key or --sas-token as credentials, or use `--auth-mode login` if you ' 'have required RBAC roles in your command. For more information about RBAC roles ' 'in storage, visit ' 'https://learn.microsoft.com/azure/storage/common/storage-auth-aad-rbac-cli. \n' 'Setting the corresponding environment variables can avoid inputting credentials in ' 'your command. Please use --help to get more information.') n.account_key = _query_account_key(cmd.cli_ctx, n.account_name)
Retrieves storage connection parameters from environment variables and parses out connection string into account name and key
validate_client_parameters
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def process_blob_source_uri(cmd, namespace): """ Validate the parameters referenced to a blob source and create the source URI from them. """ from .util import create_short_lived_blob_sas usage_string = \ 'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \ '\n\t --source-uri' \ '\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & sas] ' \ '\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & key] ' ns = vars(namespace) # source as blob container = ns.pop('source_container', None) blob = ns.pop('source_blob', None) snapshot = ns.pop('source_snapshot', None) # source credential clues source_account_name = ns.pop('source_account_name', None) source_account_key = ns.pop('source_account_key', None) sas = ns.pop('source_sas', None) # source in the form of an uri uri = ns.get('copy_source', None) if uri: if any([container, blob, sas, snapshot, source_account_name, source_account_key]): raise ValueError(usage_string.format('Unused parameters are given in addition to the ' 'source URI')) # simplest scenario--no further processing necessary return validate_client_parameters(cmd, namespace) # must run first to resolve storage account # determine if the copy will happen in the same storage account if not source_account_name and source_account_key: raise ValueError(usage_string.format('Source account key is given but account name is not')) if not source_account_name and not source_account_key: # neither source account name or key is given, assume that user intends to copy blob in # the same account source_account_name = ns.get('account_name', None) source_account_key = ns.get('account_key', None) elif source_account_name and not source_account_key: if source_account_name == ns.get('account_name', None): # the source account name is same as the destination account name source_account_key = ns.get('account_key', None) else: # the source account is different from destination account but the key is missing # try to query one. try: source_account_key = _query_account_key(cmd.cli_ctx, source_account_name) except ValueError: raise ValueError('Source storage account {} not found.'.format(source_account_name)) # else: both source account name and key are given by user if not source_account_name: raise ValueError(usage_string.format('Storage account name not found')) if not sas: sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob) query_params = [] if sas: query_params.append(sas) if snapshot: query_params.append('snapshot={}'.format(snapshot)) uri = 'https://{}.blob.{}/{}/{}{}{}'.format(source_account_name, cmd.cli_ctx.cloud.suffixes.storage_endpoint, container, blob, '?' if query_params else '', '&'.join(query_params)) namespace.copy_source = uri
Validate the parameters referenced to a blob source and create the source URI from them.
process_blob_source_uri
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def validate_encryption_services(cmd, namespace): """ Builds up the encryption services object for storage account operations based on the list of services passed in. """ if namespace.encryption_services: t_encryption_services, t_encryption_service = get_sdk(cmd.cli_ctx, ResourceType.MGMT_STORAGE, 'EncryptionServices', 'EncryptionService', mod='models') services = {service: t_encryption_service(enabled=True) for service in namespace.encryption_services} namespace.encryption_services = t_encryption_services(**services)
Builds up the encryption services object for storage account operations based on the list of services passed in.
validate_encryption_services
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def cast_val(key, val): """ Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they can be queried correctly. """ if key in ['PartitionKey', 'RowKey']: return val def try_cast(to_type): try: return to_type(val) except ValueError: return None return try_cast(int) or try_cast(float) or val
Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they can be queried correctly.
validate_entity.cast_val
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def validate_entity(namespace): """ Converts a list of key value pairs into a dictionary. Ensures that required RowKey and PartitionKey are converted to the correct case and included. """ values = dict(x.split('=', 1) for x in namespace.entity) keys = values.keys() for key in list(keys): if key.lower() == 'rowkey': val = values[key] del values[key] values['RowKey'] = val elif key.lower() == 'partitionkey': val = values[key] del values[key] values['PartitionKey'] = val keys = values.keys() missing_keys = 'RowKey ' if 'RowKey' not in keys else '' missing_keys = '{}PartitionKey'.format(missing_keys) \ if 'PartitionKey' not in keys else missing_keys if missing_keys: raise argparse.ArgumentError( None, 'incorrect usage: entity requires: {}'.format(missing_keys)) def cast_val(key, val): """ Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they can be queried correctly. """ if key in ['PartitionKey', 'RowKey']: return val def try_cast(to_type): try: return to_type(val) except ValueError: return None return try_cast(int) or try_cast(float) or val # ensure numbers are converted from strings so querying will work correctly values = {key: cast_val(key, val) for key, val in values.items()} namespace.entity = values
Converts a list of key value pairs into a dictionary. Ensures that required RowKey and PartitionKey are converted to the correct case and included.
validate_entity
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def validate_marker(namespace): """ Converts a list of key value pairs into a dictionary. Ensures that required nextrowkey and nextpartitionkey are included. """ if not namespace.marker: return marker = dict(x.split('=', 1) for x in namespace.marker) expected_keys = {'nextrowkey', 'nextpartitionkey'} for key in list(marker.keys()): new_key = key.lower() if new_key in expected_keys: expected_keys.remove(key.lower()) val = marker[key] del marker[key] marker[new_key] = val if expected_keys: raise argparse.ArgumentError( None, 'incorrect usage: marker requires: {}'.format(' '.join(expected_keys))) namespace.marker = marker
Converts a list of key value pairs into a dictionary. Ensures that required nextrowkey and nextpartitionkey are included.
validate_marker
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def get_file_path_validator(default_file_param=None): """ Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'. Allows another path-type parameter to be named which can supply a default filename. """ def validator(namespace): import os if not hasattr(namespace, 'path'): return path = namespace.path dir_name, file_name = os.path.split(path) if path else (None, '') if default_file_param and '.' not in file_name: dir_name = path file_name = os.path.split(getattr(namespace, default_file_param))[1] dir_name = None if dir_name in ('', '.') else dir_name namespace.directory_name = dir_name namespace.file_name = file_name del namespace.path return validator
Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'. Allows another path-type parameter to be named which can supply a default filename.
get_file_path_validator
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def table_permission_validator(cmd, namespace): """ A special case for table because the SDK associates the QUERY permission with 'r' """ t_table_permissions = get_table_data_type(cmd.cli_ctx, 'table', 'TablePermissions') if namespace.permission: if set(namespace.permission) - set('raud'): help_string = '(r)ead/query (a)dd (u)pdate (d)elete' raise ValueError('valid values are {} or a combination thereof.'.format(help_string)) namespace.permission = t_table_permissions(_str=namespace.permission)
A special case for table because the SDK associates the QUERY permission with 'r'
table_permission_validator
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def get_source_file_or_blob_service_client(cmd, namespace): """ Create the second file service or blob service client for batch copy command, which is used to list the source files or blobs. If both the source account and source URI are omitted, it indicates that user want to copy files or blobs in the same storage account, therefore the destination client will be set None hence the command will use destination client. """ t_file_svc, t_block_blob_svc = cmd.get_models('file#FileService', 'blob.blockblobservice#BlockBlobService') usage_string = 'invalid usage: supply only one of the following argument sets:' + \ '\n\t --source-uri [--source-sas]' + \ '\n\tOR --source-container' + \ '\n\tOR --source-container --source-account-name --source-account-key' + \ '\n\tOR --source-container --source-account-name --source-sas' + \ '\n\tOR --source-share --source-account-name --source-account-key' + \ '\n\tOR --source-share --source-account-name --source-account-sas' ns = vars(namespace) source_account = ns.pop('source_account_name', None) source_key = ns.pop('source_account_key', None) source_uri = ns.pop('source_uri', None) source_sas = ns.get('source_sas', None) source_container = ns.get('source_container', None) source_share = ns.get('source_share', None) if source_uri and source_account: raise ValueError(usage_string) if not source_uri and bool(source_container) == bool(source_share): # must be container or share raise ValueError(usage_string) if (not source_account) and (not source_uri): # Set the source_client to None if neither source_account or source_uri is given. This # indicates the command that the source files share or blob container is in the same storage # account as the destination file share or blob container. # # The command itself should create the source service client since the validator can't # access the destination client through the namespace. # # A few arguments check will be made as well so as not to cause ambiguity. if source_key or source_sas: raise ValueError('invalid usage: --source-account-name is missing; the source account is assumed to be the' ' same as the destination account. Do not provide --source-sas or --source-account-key') ns['source_client'] = None if 'token_credential' not in ns: # not using oauth return # oauth is only possible through destination, must still get source creds source_account, source_key, source_sas = ns['account_name'], ns['account_key'], ns['sas_token'] if source_account: if not (source_key or source_sas): # when neither storage account key or SAS is given, try to fetch the key in the current # subscription source_key = _query_account_key(cmd.cli_ctx, source_account) if source_container: ns['source_client'] = get_storage_data_service_client( cmd.cli_ctx, t_block_blob_svc, name=source_account, key=source_key, sas_token=source_sas) elif source_share: ns['source_client'] = get_storage_data_service_client( cmd.cli_ctx, t_file_svc, name=source_account, key=source_key, sas_token=source_sas) elif source_uri: if source_key or source_container or source_share: raise ValueError(usage_string) from .storage_url_helpers import StorageResourceIdentifier if source_sas: source_uri = '{}{}{}'.format(source_uri, '?', source_sas.lstrip('?')) identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, source_uri) nor_container_or_share = not identifier.container and not identifier.share if not identifier.is_url(): raise ValueError('incorrect usage: --source-uri expects a URI') if identifier.blob or identifier.directory or identifier.filename or nor_container_or_share: raise ValueError('incorrect usage: --source-uri has to be blob container or file share') if identifier.sas_token: ns['source_sas'] = identifier.sas_token else: source_key = _query_account_key(cmd.cli_ctx, identifier.account_name) if identifier.container: ns['source_container'] = identifier.container if identifier.account_name != ns.get('account_name'): ns['source_client'] = get_storage_data_service_client( cmd.cli_ctx, t_block_blob_svc, name=identifier.account_name, key=source_key, sas_token=identifier.sas_token) elif identifier.share: ns['source_share'] = identifier.share if identifier.account_name != ns.get('account_name'): ns['source_client'] = get_storage_data_service_client( cmd.cli_ctx, t_file_svc, name=identifier.account_name, key=source_key, sas_token=identifier.sas_token)
Create the second file service or blob service client for batch copy command, which is used to list the source files or blobs. If both the source account and source URI are omitted, it indicates that user want to copy files or blobs in the same storage account, therefore the destination client will be set None hence the command will use destination client.
get_source_file_or_blob_service_client
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def process_container_delete_parameters(cmd, namespace): """Process the parameters for storage container delete command""" # check whether to use mgmt or data-plane if namespace.bypass_immutability_policy: # use management-plane namespace.processed_account_name = namespace.account_name namespace.processed_resource_group, namespace.mgmt_client = _query_account_rg( cmd.cli_ctx, namespace.account_name) del namespace.auth_mode else: # use data-plane, like before validate_client_parameters(cmd, namespace)
Process the parameters for storage container delete command
process_container_delete_parameters
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def process_blob_download_batch_parameters(cmd, namespace): """Process the parameters for storage blob download command""" import os # 1. quick check if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination): raise ValueError('incorrect usage: destination must be an existing directory') # 2. try to extract account name and container name from source string _process_blob_batch_container_parameters(cmd, namespace) # 3. Call validators add_progress_callback(cmd, namespace)
Process the parameters for storage blob download command
process_blob_download_batch_parameters
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def process_blob_upload_batch_parameters(cmd, namespace): """Process the source and destination of storage blob upload command""" import os # 1. quick check if not os.path.exists(namespace.source) or not os.path.isdir(namespace.source): raise ValueError('incorrect usage: source must be an existing directory') # 2. try to extract account name and container name from destination string _process_blob_batch_container_parameters(cmd, namespace, source=False) # 3. collect the files to be uploaded namespace.source = os.path.realpath(namespace.source) namespace.source_files = list(glob_files_locally(namespace.source, namespace.pattern)) # 4. determine blob type if namespace.blob_type is None: vhd_files = [f for f in namespace.source_files if f[0].endswith('.vhd')] if any(vhd_files) and len(vhd_files) == len(namespace.source_files): # when all the listed files are vhd files use page namespace.blob_type = 'page' elif any(vhd_files): # source files contain vhd files but not all of them raise CLIError("""Fail to guess the required blob type. Type of the files to be uploaded are not consistent. Default blob type for .vhd files is "page", while others are "block". You can solve this problem by either explicitly set the blob type or ensure the pattern matches a correct set of files.""") else: namespace.blob_type = 'block' # 5. call other validators validate_metadata(namespace) t_blob_content_settings = cmd.loader.get_sdk('blob.models#ContentSettings') get_content_setting_validator(t_blob_content_settings, update=False)(cmd, namespace) add_progress_callback(cmd, namespace)
Process the source and destination of storage blob upload command
process_blob_upload_batch_parameters
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def _process_blob_batch_container_parameters(cmd, namespace, source=True): """Process the container parameters for storage blob batch commands before populating args from environment.""" if source: container_arg, container_name_arg = 'source', 'source_container_name' else: # destination container_arg, container_name_arg = 'destination', 'destination_container_name' # try to extract account name and container name from source string from .storage_url_helpers import StorageResourceIdentifier container_arg_val = getattr(namespace, container_arg) # either a url or name identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, container_arg_val) if not identifier.is_url(): setattr(namespace, container_name_arg, container_arg_val) elif identifier.blob: raise ValueError('incorrect usage: {} should be either a container URL or name'.format(container_arg)) else: setattr(namespace, container_name_arg, identifier.container) if namespace.account_name is None: namespace.account_name = identifier.account_name elif namespace.account_name != identifier.account_name: raise ValueError('The given storage account name is not consistent with the ' 'account name in the destination URL') # if no sas-token is given and the container url contains one, use it if not namespace.sas_token and identifier.sas_token: namespace.sas_token = identifier.sas_token # Finally, grab missing storage connection parameters from environment variables validate_client_parameters(cmd, namespace)
Process the container parameters for storage blob batch commands before populating args from environment.
_process_blob_batch_container_parameters
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def process_file_upload_batch_parameters(cmd, namespace): """Process the parameters of storage file batch upload command""" import os # 1. quick check if not os.path.exists(namespace.source): raise ValueError('incorrect usage: source {} does not exist'.format(namespace.source)) if not os.path.isdir(namespace.source): raise ValueError('incorrect usage: source must be a directory') # 2. try to extract account name and container name from destination string from .storage_url_helpers import StorageResourceIdentifier identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.destination) if identifier.is_url(): if identifier.filename or identifier.directory: raise ValueError('incorrect usage: destination must be a file share url') namespace.destination = identifier.share if not namespace.account_name: namespace.account_name = identifier.account_name namespace.source = os.path.realpath(namespace.source)
Process the parameters of storage file batch upload command
process_file_upload_batch_parameters
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def process_file_download_batch_parameters(cmd, namespace): """Process the parameters for storage file batch download command""" import os # 1. quick check if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination): raise ValueError('incorrect usage: destination must be an existing directory') # 2. try to extract account name and share name from source string process_file_batch_source_parameters(cmd, namespace)
Process the parameters for storage file batch download command
process_file_download_batch_parameters
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def datetime_type(string): """ Validates UTC datetime. Examples of accepted forms: 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """ accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ', '%Y-%m-%dT%HZ', '%Y-%m-%d'] for form in accepted_date_formats: try: if to_string: return datetime.strptime(string, form).strftime(form) return datetime.strptime(string, form) except ValueError: continue raise ValueError("Input '{}' not valid. Valid example: 2000-12-31T12:59:59Z".format(string))
Validates UTC datetime. Examples of accepted forms: 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31
get_datetime_type.datetime_type
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def get_datetime_type(to_string): """ Validates UTC datetime. Examples of accepted forms: 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """ from datetime import datetime def datetime_type(string): """ Validates UTC datetime. Examples of accepted forms: 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """ accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ', '%Y-%m-%dT%HZ', '%Y-%m-%d'] for form in accepted_date_formats: try: if to_string: return datetime.strptime(string, form).strftime(form) return datetime.strptime(string, form) except ValueError: continue raise ValueError("Input '{}' not valid. Valid example: 2000-12-31T12:59:59Z".format(string)) return datetime_type
Validates UTC datetime. Examples of accepted forms: 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31
get_datetime_type
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def ipv4_range_type(string): """ Validates an IPv4 address or address range. """ import re ip_format = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' if not re.match("^{}$".format(ip_format), string): if not re.match("^{ip_format}-{ip_format}$".format(ip_format=ip_format), string): raise ValueError return string
Validates an IPv4 address or address range.
ipv4_range_type
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def resource_type_type(loader): """ Returns a function which validates that resource types string contains only a combination of service, container, and object. Their shorthand representations are s, c, and o. """ def impl(string): t_resources = loader.get_models('common.models#ResourceTypes') if set(string) - set("sco"): raise ValueError return t_resources(_str=''.join(set(string))) return impl
Returns a function which validates that resource types string contains only a combination of service, container, and object. Their shorthand representations are s, c, and o.
resource_type_type
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def services_type(loader): """ Returns a function which validates that services string contains only a combination of blob, queue, table, and file. Their shorthand representations are b, q, t, and f. """ def impl(string): t_services = loader.get_models('common.models#Services') if set(string) - set("bqtf"): raise ValueError return t_services(_str=''.join(set(string))) return impl
Returns a function which validates that services string contains only a combination of blob, queue, table, and file. Their shorthand representations are b, q, t, and f.
services_type
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py
MIT
def get_custom_sdk(custom_module, client_factory, resource_type=ResourceType.DATA_STORAGE): """Returns a CliCommandType instance with specified operation template based on the given custom module name. This is useful when the command is not defined in the default 'custom' module but instead in a module under 'operations' package.""" return CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.{}#'.format( custom_module) + '{}', client_factory=client_factory, resource_type=resource_type )
Returns a CliCommandType instance with specified operation template based on the given custom module name. This is useful when the command is not defined in the default 'custom' module but instead in a module under 'operations' package.
load_command_table.get_custom_sdk
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/commands_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/commands_azure_stack.py
MIT
def load_command_table(self, _): # pylint: disable=too-many-locals, too-many-statements storage_account_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#StorageAccountsOperations.{}', client_factory=cf_sa, resource_type=ResourceType.MGMT_STORAGE ) blob_service_mgmt_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#BlobServicesOperations.{}', client_factory=cf_mgmt_blob_services, resource_type=ResourceType.MGMT_STORAGE ) file_service_mgmt_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#FileServicesOperations.{}', client_factory=cf_mgmt_file_services, resource_type=ResourceType.MGMT_STORAGE ) file_shares_mgmt_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#FileSharesOperations.{}', client_factory=cf_mgmt_file_shares, resource_type=ResourceType.MGMT_STORAGE ) storage_account_sdk_keys = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#StorageAccountsOperations.{}', client_factory=cf_sa_for_keys, resource_type=ResourceType.MGMT_STORAGE ) private_link_resource_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#PrivateLinkResourcesOperations.{}', client_factory=cf_private_link, resource_type=ResourceType.MGMT_STORAGE ) private_endpoint_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#PrivateEndpointConnectionsOperations.{}', client_factory=cf_private_endpoint, resource_type=ResourceType.MGMT_STORAGE ) private_endpoint_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.account#{}', client_factory=cf_private_endpoint, resource_type=ResourceType.MGMT_STORAGE) storage_account_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.account#{}', client_factory=cf_sa) cloud_data_plane_sdk = CliCommandType( operations_tmpl='azure.multiapi.storage.common#CloudStorageAccount.{}', client_factory=cloud_storage_account_service_factory ) block_blob_sdk = CliCommandType( operations_tmpl='azure.multiapi.storage.blob.blockblobservice#BlockBlobService.{}', client_factory=blob_data_service_factory, resource_type=ResourceType.DATA_STORAGE) def get_custom_sdk(custom_module, client_factory, resource_type=ResourceType.DATA_STORAGE): """Returns a CliCommandType instance with specified operation template based on the given custom module name. This is useful when the command is not defined in the default 'custom' module but instead in a module under 'operations' package.""" return CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.{}#'.format( custom_module) + '{}', client_factory=client_factory, resource_type=resource_type ) with self.command_group('storage', command_type=block_blob_sdk, custom_command_type=get_custom_sdk('azcopy', blob_data_service_factory)) as g: g.storage_custom_command('remove', 'storage_remove', is_preview=True) with self.command_group('storage', custom_command_type=get_custom_sdk('azcopy', None)) as g: g.custom_command('copy', 'storage_copy', is_preview=True) with self.command_group('storage account', storage_account_sdk, resource_type=ResourceType.MGMT_STORAGE, custom_command_type=storage_account_custom_type) as g: g.custom_command('check-name', 'check_name_availability') g.custom_command('create', 'create_storage_account') g.command('delete', 'delete', confirmation=True) g.show_command('show', 'get_properties') g.custom_command('list', 'list_storage_accounts') g.custom_command( 'show-usage', 'show_storage_account_usage', min_api='2018-02-01') g.custom_command( 'show-usage', 'show_storage_account_usage_no_location', max_api='2017-10-01') g.custom_command('show-connection-string', 'show_storage_account_connection_string') g.generic_update_command('update', getter_name='get_properties', setter_name='update', custom_func_name='update_storage_account', min_api='2016-12-01') failover_confirmation = """ The secondary cluster will become the primary cluster after failover. Please understand the following impact to your storage account before you initiate the failover: 1. Please check the Last Sync Time using `az storage account show` with `--expand geoReplicationStats` and check the "geoReplicationStats" property. This is the data you may lose if you initiate the failover. 2. After the failover, your storage account type will be converted to locally redundant storage (LRS). You can convert your account to use geo-redundant storage (GRS). 3. Once you re-enable GRS/GZRS for your storage account, Microsoft will replicate data to your new secondary region. Replication time is dependent on the amount of data to replicate. Please note that there are bandwidth charges for the bootstrap. Please refer to doc: https://azure.microsoft.com/pricing/details/bandwidth/ """ g.command('failover', 'begin_failover', supports_no_wait=True, is_preview=True, min_api='2018-07-01', confirmation=failover_confirmation) with self.command_group('storage account', storage_account_sdk_keys, resource_type=ResourceType.MGMT_STORAGE, custom_command_type=storage_account_custom_type) as g: from ._validators_azure_stack import validate_key_name g.custom_command('keys renew', 'regenerate_key', validator=validate_key_name, transform=lambda x: getattr(x, 'keys', x)) g.command('keys list', 'list_keys', transform=lambda x: getattr(x, 'keys', x)) g.command('revoke-delegation-keys', 'revoke_user_delegation_keys', min_api='2019-04-01') with self.command_group('storage account', cloud_data_plane_sdk) as g: g.storage_command('generate-sas', 'generate_shared_access_signature') encryption_scope_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#EncryptionScopesOperations.{}', client_factory=cf_mgmt_encryption_scope, resource_type=ResourceType.MGMT_STORAGE ) encryption_scope_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.account#{}', client_factory=cf_mgmt_encryption_scope, resource_type=ResourceType.MGMT_STORAGE ) with self.command_group('storage account encryption-scope', encryption_scope_sdk, custom_command_type=encryption_scope_custom_type, is_preview=True, resource_type=ResourceType.MGMT_STORAGE, min_api='2019-06-01') as g: g.custom_command('create', 'create_encryption_scope') g.show_command('show', 'get') g.command('list', 'list') g.custom_command('update', 'update_encryption_scope') management_policy_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#ManagementPoliciesOperations.{}', client_factory=cf_mgmt_policy, resource_type=ResourceType.MGMT_STORAGE ) management_policy_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.account#{}', client_factory=cf_mgmt_policy) storage_blob_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.blob_azure_stack#{}', client_factory=cf_sa, resource_type=ResourceType.MGMT_STORAGE) with self.command_group('storage account management-policy', management_policy_sdk, resource_type=ResourceType.MGMT_STORAGE, min_api='2018-11-01', custom_command_type=management_policy_custom_type) as g: g.custom_show_command('show', 'get_management_policy') g.custom_command('create', 'create_management_policies') g.generic_update_command('update', getter_name='get_management_policy', getter_type=management_policy_custom_type, setter_name='update_management_policies', setter_type=management_policy_custom_type) g.custom_command('delete', 'delete_management_policy') with self.command_group('storage account network-rule', storage_account_sdk, custom_command_type=storage_account_custom_type, resource_type=ResourceType.MGMT_STORAGE, min_api='2017-06-01') as g: g.custom_command('add', 'add_network_rule') g.custom_command('list', 'list_network_rules') g.custom_command('remove', 'remove_network_rule') with self.command_group('storage account private-endpoint-connection', private_endpoint_sdk, custom_command_type=private_endpoint_custom_type, is_preview=True, resource_type=ResourceType.MGMT_STORAGE, min_api='2019-06-01') as g: from ._validators import validate_private_endpoint_connection_id g.command('delete', 'delete', confirmation=True, validator=validate_private_endpoint_connection_id) g.show_command('show', 'get', validator=validate_private_endpoint_connection_id) g.custom_command('approve', 'approve_private_endpoint_connection', validator=validate_private_endpoint_connection_id) g.custom_command('reject', 'reject_private_endpoint_connection', validator=validate_private_endpoint_connection_id) with self.command_group('storage account private-link-resource', private_link_resource_sdk, resource_type=ResourceType.MGMT_STORAGE) as g: from azure.cli.core.commands.transform import gen_dict_to_list_transform g.command('list', 'list_by_storage_account', is_preview=True, min_api='2019-06-01', transform=gen_dict_to_list_transform(key="value")) with self.command_group('storage account blob-service-properties', blob_service_mgmt_sdk, custom_command_type=storage_account_custom_type, resource_type=ResourceType.MGMT_STORAGE, min_api='2018-07-01', is_preview=True) as g: g.show_command('show', 'get_service_properties') g.generic_update_command('update', getter_name='get_service_properties', setter_name='set_service_properties', custom_func_name='update_blob_service_properties') with self.command_group('storage account file-service-properties', file_service_mgmt_sdk, custom_command_type=get_custom_sdk('account', client_factory=cf_mgmt_file_services, resource_type=ResourceType.MGMT_STORAGE), resource_type=ResourceType.MGMT_STORAGE, min_api='2019-06-01', is_preview=True) as g: g.show_command('show', 'get_service_properties') g.custom_command('update', 'update_file_service_properties') with self.command_group('storage logging', get_custom_sdk('logging', multi_service_properties_factory)) as g: from ._transformers_azure_stack import transform_logging_list_output g.storage_command('update', 'set_logging') g.storage_command('show', 'get_logging', table_transformer=transform_logging_list_output, exception_handler=show_exception_handler) g.storage_command('off', 'disable_logging', is_preview=True) with self.command_group('storage metrics', get_custom_sdk('metrics', multi_service_properties_factory)) as g: from ._transformers_azure_stack import transform_metrics_list_output g.storage_command('update', 'set_metrics') g.storage_command('show', 'get_metrics', table_transformer=transform_metrics_list_output, exception_handler=show_exception_handler) base_blob_sdk = CliCommandType( operations_tmpl='azure.multiapi.storage.blob.baseblobservice#BaseBlobService.{}', client_factory=blob_data_service_factory, resource_type=ResourceType.DATA_STORAGE) with self.command_group('storage blob', command_type=block_blob_sdk, custom_command_type=get_custom_sdk('blob_azure_stack', blob_data_service_factory)) as g: from ._format import transform_boolean_for_table, transform_blob_output from ._transformers_azure_stack import (transform_storage_list_output, transform_url, create_boolean_result_output_transformer) from ._validators_azure_stack import (process_blob_download_batch_parameters, process_blob_delete_batch_parameters, process_blob_upload_batch_parameters) g.storage_command_oauth('list', 'list_blobs', transform=transform_storage_list_output, table_transformer=transform_blob_output) g.storage_command_oauth( 'download', 'get_blob_to_path', table_transformer=transform_blob_output) g.storage_custom_command_oauth('generate-sas', 'generate_sas_blob_uri') g.storage_custom_command_oauth( 'url', 'create_blob_url', transform=transform_url) g.storage_command_oauth('snapshot', 'snapshot_blob') g.storage_command_oauth('update', 'set_blob_properties') g.storage_command_oauth( 'exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_command_oauth('delete', 'delete_blob') g.storage_command_oauth('undelete', 'undelete_blob', transform=create_boolean_result_output_transformer( 'undeleted'), table_transformer=transform_boolean_for_table, min_api='2017-07-29') g.storage_custom_command_oauth('set-tier', 'set_blob_tier') g.storage_custom_command_oauth('upload', 'upload_blob', doc_string_source='blob#BlockBlobService.create_blob_from_path') g.storage_custom_command_oauth('upload-batch', 'storage_blob_upload_batch', validator=process_blob_upload_batch_parameters) g.storage_custom_command_oauth('download-batch', 'storage_blob_download_batch', validator=process_blob_download_batch_parameters) g.storage_custom_command_oauth('delete-batch', 'storage_blob_delete_batch', validator=process_blob_delete_batch_parameters) g.storage_custom_command_oauth('show', 'show_blob', table_transformer=transform_blob_output, client_factory=page_blob_service_factory, doc_string_source='blob#PageBlobService.get_blob_properties', exception_handler=show_exception_handler) g.storage_command_oauth( 'metadata show', 'get_blob_metadata', exception_handler=show_exception_handler) g.storage_command_oauth('metadata update', 'set_blob_metadata') g.storage_command_oauth('lease acquire', 'acquire_blob_lease') g.storage_command_oauth('lease renew', 'renew_blob_lease') g.storage_command_oauth('lease release', 'release_blob_lease') g.storage_command_oauth('lease change', 'change_blob_lease') g.storage_command_oauth('lease break', 'break_blob_lease') g.storage_command_oauth('copy start', 'copy_blob') g.storage_command_oauth('copy cancel', 'abort_copy_blob') g.storage_custom_command_oauth( 'copy start-batch', 'storage_blob_copy_batch') with self.command_group('storage blob', storage_account_sdk, resource_type=ResourceType.MGMT_STORAGE, custom_command_type=storage_blob_custom_type) as g: g.custom_command('restore', 'restore_blob_ranges', min_api='2019-06-01', is_preview=True, supports_no_wait=True) with self.command_group('storage blob incremental-copy', operations_tmpl='azure.multiapi.storage.blob.pageblobservice#PageBlobService.{}', client_factory=page_blob_service_factory, resource_type=ResourceType.DATA_STORAGE, min_api='2016-05-31') as g: g.storage_command_oauth('start', 'incremental_copy_blob') with self.command_group('storage blob incremental-copy', operations_tmpl='azure.multiapi.storage.blob.blockblobservice#BlockBlobService.{}', client_factory=page_blob_service_factory, resource_type=ResourceType.DATA_STORAGE, min_api='2016-05-31') as g: g.storage_command_oauth('cancel', 'abort_copy_blob') with self.command_group('storage blob service-properties delete-policy', command_type=base_blob_sdk, min_api='2017-07-29', custom_command_type=get_custom_sdk('blob_azure_stack', blob_data_service_factory)) as g: g.storage_command_oauth('show', 'get_blob_service_properties', transform=lambda x: getattr( x, 'delete_retention_policy', x), exception_handler=show_exception_handler) g.storage_custom_command_oauth('update', 'set_delete_policy') with self.command_group('storage blob service-properties', command_type=base_blob_sdk) as g: g.storage_command_oauth( 'show', 'get_blob_service_properties', exception_handler=show_exception_handler) g.storage_command_oauth('update', generic_update=True, getter_name='get_blob_service_properties', setter_type=get_custom_sdk( 'blob_azure_stack', cf_blob_data_gen_update), setter_name='set_service_properties', client_factory=cf_blob_data_gen_update) with self.command_group('storage blob', command_type=block_blob_sdk, custom_command_type=get_custom_sdk('azcopy', blob_data_service_factory)) as g: g.storage_custom_command_oauth('sync', 'storage_blob_sync', is_preview=True) with self.command_group('storage container', command_type=block_blob_sdk, custom_command_type=get_custom_sdk('blob_azure_stack', blob_data_service_factory)) as g: from azure.cli.command_modules.storage._transformers_azure_stack import (transform_storage_list_output, transform_container_permission_output, transform_acl_list_output) from azure.cli.command_modules.storage._format import (transform_container_list, transform_boolean_for_table, transform_container_show) from ._validators_azure_stack import process_container_delete_parameters, validate_client_auth_parameter g.storage_command_oauth('list', 'list_containers', transform=transform_storage_list_output, table_transformer=transform_container_list) g.storage_custom_command_oauth('delete', 'delete_container', validator=process_container_delete_parameters, transform=create_boolean_result_output_transformer( 'deleted'), table_transformer=transform_boolean_for_table) g.storage_command_oauth('show', 'get_container_properties', table_transformer=transform_container_show, exception_handler=show_exception_handler) g.storage_custom_command_oauth('create', 'create_container', validator=validate_client_auth_parameter, client_factory=None, transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table) g.storage_custom_command_oauth('generate-sas', 'generate_container_shared_access_signature', min_api='2018-11-09') g.storage_command_oauth( 'generate-sas', 'generate_container_shared_access_signature', max_api='2018-03-28') g.storage_command_oauth('exists', 'exists', transform=create_boolean_result_output_transformer('exists'), table_transformer=transform_boolean_for_table) g.storage_command_oauth('set-permission', 'set_container_acl') g.storage_command_oauth('show-permission', 'get_container_acl', transform=transform_container_permission_output) g.storage_command_oauth('metadata update', 'set_container_metadata') g.storage_command_oauth( 'metadata show', 'get_container_metadata', exception_handler=show_exception_handler) g.storage_command_oauth('lease acquire', 'acquire_container_lease') g.storage_command_oauth('lease renew', 'renew_container_lease') g.storage_command_oauth('lease release', 'release_container_lease') g.storage_command_oauth('lease change', 'change_container_lease') g.storage_command_oauth('lease break', 'break_container_lease') with self.command_group('storage container', command_type=block_blob_sdk, custom_command_type=get_custom_sdk('acl', blob_data_service_factory)) as g: g.storage_custom_command_oauth('policy create', 'create_acl_policy') g.storage_custom_command_oauth('policy delete', 'delete_acl_policy') g.storage_custom_command_oauth( 'policy update', 'set_acl_policy', min_api='2017-04-17') g.storage_custom_command_oauth( 'policy show', 'get_acl_policy', exception_handler=show_exception_handler) g.storage_custom_command_oauth( 'policy list', 'list_acl_policies', table_transformer=transform_acl_list_output) blob_container_mgmt_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#BlobContainersOperations.{}', client_factory=cf_blob_container_mgmt, resource_type=ResourceType.MGMT_STORAGE ) with self.command_group('storage container immutability-policy', command_type=blob_container_mgmt_sdk, custom_command_type=get_custom_sdk('blob', cf_blob_container_mgmt, resource_type=ResourceType.MGMT_STORAGE), min_api='2018-02-01') as g: from azure.cli.command_modules.storage._transformers import transform_immutability_policy g.show_command('show', 'get_immutability_policy', transform=transform_immutability_policy) g.custom_command('create', 'create_or_update_immutability_policy') g.command('delete', 'delete_immutability_policy', transform=lambda x: None) g.command('lock', 'lock_immutability_policy') g.custom_command('extend', 'extend_immutability_policy') with self.command_group('storage container legal-hold', command_type=blob_container_mgmt_sdk, custom_command_type=get_custom_sdk('blob', cf_blob_container_mgmt, resource_type=ResourceType.MGMT_STORAGE), min_api='2018-02-01') as g: g.custom_command('set', 'set_legal_hold') g.custom_command('clear', 'clear_legal_hold') g.show_command( 'show', 'get', transform=lambda x: getattr(x, 'legal_hold', x)) file_sdk = CliCommandType( operations_tmpl='azure.multiapi.storage.file.fileservice#FileService.{}', client_factory=file_data_service_factory, resource_type=ResourceType.DATA_STORAGE) with self.command_group('storage share-rm', command_type=file_shares_mgmt_sdk, custom_command_type=get_custom_sdk('file_azure_stack', cf_mgmt_file_shares, resource_type=ResourceType.MGMT_STORAGE), resource_type=ResourceType.MGMT_STORAGE, min_api='2019-04-01', is_preview=True) as g: g.command('create', 'create') g.command('delete', 'delete', confirmation=True) g.custom_command('exists', '_file_share_exists', transform=create_boolean_result_output_transformer('exists')) g.command('list', 'list') g.show_command('show', 'get') g.command('update', 'update') with self.command_group('storage share', command_type=file_sdk, custom_command_type=get_custom_sdk('file_azure_stack', file_data_service_factory)) as g: from ._format import (transform_share_list, transform_boolean_for_table) g.storage_command('list', 'list_shares', transform=transform_storage_list_output, table_transformer=transform_share_list) g.storage_command('create', 'create_share', transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table) g.storage_command('delete', 'delete_share', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_command( 'generate-sas', 'generate_share_shared_access_signature') g.storage_command('stats', 'get_share_stats') g.storage_command('show', 'get_share_properties', exception_handler=show_exception_handler) g.storage_command('update', 'set_share_properties') g.storage_command('snapshot', 'snapshot_share', min_api='2017-04-17') g.storage_command( 'exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_custom_command( 'url', 'create_share_url', transform=transform_url) g.storage_command('metadata show', 'get_share_metadata', exception_handler=show_exception_handler) g.storage_command('metadata update', 'set_share_metadata') with self.command_group('storage share policy', command_type=file_sdk, custom_command_type=get_custom_sdk('acl', file_data_service_factory)) as g: g.storage_custom_command('create', 'create_acl_policy') g.storage_custom_command('delete', 'delete_acl_policy') g.storage_custom_command( 'show', 'get_acl_policy', exception_handler=show_exception_handler) g.storage_custom_command( 'list', 'list_acl_policies', table_transformer=transform_acl_list_output) g.storage_custom_command('update', 'set_acl_policy') with self.command_group('storage directory', command_type=file_sdk, custom_command_type=get_custom_sdk('directory_azure_stack', file_data_service_factory)) as g: from ._format import transform_file_output from ._format_azure_stack import transform_file_directory_result g.storage_command('create', 'create_directory', transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table) g.storage_command('delete', 'delete_directory', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_command('show', 'get_directory_properties', table_transformer=transform_file_output, exception_handler=show_exception_handler) g.storage_command( 'exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_command('metadata show', 'get_directory_metadata', exception_handler=show_exception_handler) g.storage_command('metadata update', 'set_directory_metadata') g.storage_custom_command('list', 'list_share_directories', transform=transform_file_directory_result( self.cli_ctx), table_transformer=transform_file_output, doc_string_source='file#FileService.list_directories_and_files') with self.command_group('storage file', command_type=file_sdk, custom_command_type=get_custom_sdk('file_azure_stack', file_data_service_factory)) as g: from ._format import transform_boolean_for_table, transform_file_output from ._transformers_azure_stack import transform_url g.storage_custom_command('list', 'list_share_files', transform=transform_file_directory_result(self.cli_ctx), table_transformer=transform_file_output, doc_string_source='file#FileService.list_directories_and_files') g.storage_command('delete', 'delete_file', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_command('resize', 'resize_file') g.storage_custom_command( 'url', 'create_file_url', transform=transform_url) g.storage_command( 'generate-sas', 'generate_file_shared_access_signature') g.storage_command('show', 'get_file_properties', table_transformer=transform_file_output, exception_handler=show_exception_handler) g.storage_command('update', 'set_file_properties') g.storage_command( 'exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_command('download', 'get_file_to_path') g.storage_command('upload', 'create_file_from_path') g.storage_command('metadata show', 'get_file_metadata', exception_handler=show_exception_handler) g.storage_command('metadata update', 'set_file_metadata') g.storage_command('copy start', 'copy_file') g.storage_command('copy cancel', 'abort_copy_file') g.storage_custom_command('upload-batch', 'storage_file_upload_batch') g.storage_custom_command( 'download-batch', 'storage_file_download_batch') g.storage_custom_command('delete-batch', 'storage_file_delete_batch') g.storage_custom_command('copy start-batch', 'storage_file_copy_batch') with self.command_group('storage cors', get_custom_sdk('cors_azure_stack', multi_service_properties_factory)) as g: from ._transformers_azure_stack import transform_cors_list_output g.storage_command('add', 'add_cors') g.storage_command('clear', 'clear_cors') g.storage_command('list', 'list_cors', transform=transform_cors_list_output) queue_sdk = CliCommandType(operations_tmpl='azure.multiapi.storage.queue.queueservice#QueueService.{}', client_factory=queue_data_service_factory, resource_type=ResourceType.DATA_STORAGE) with self.command_group('storage queue', queue_sdk, custom_command_type=get_custom_sdk('acl', queue_data_service_factory)) as g: from ._format import transform_boolean_for_table from ._transformers import create_boolean_result_output_transformer g.storage_command_oauth('list', 'list_queues', transform=transform_storage_list_output) g.storage_command_oauth('create', 'create_queue', transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table) g.storage_command_oauth('delete', 'delete_queue', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_command_oauth( 'generate-sas', 'generate_queue_shared_access_signature') g.storage_command_oauth( 'stats', 'get_queue_service_stats', min_api='2016-05-31') g.storage_command_oauth( 'exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_command_oauth( 'metadata show', 'get_queue_metadata', exception_handler=show_exception_handler) g.storage_command_oauth('metadata update', 'set_queue_metadata') g.storage_custom_command_oauth('policy create', 'create_acl_policy') g.storage_custom_command_oauth('policy delete', 'delete_acl_policy') g.storage_custom_command_oauth( 'policy show', 'get_acl_policy', exception_handler=show_exception_handler) g.storage_custom_command_oauth( 'policy list', 'list_acl_policies', table_transformer=transform_acl_list_output) g.storage_custom_command_oauth('policy update', 'set_acl_policy') with self.command_group('storage message', queue_sdk) as g: from ._transformers import create_boolean_result_output_transformer from ._format import transform_message_show g.storage_command_oauth('put', 'put_message') g.storage_command_oauth('get', 'get_messages', table_transformer=transform_message_show) g.storage_command_oauth('peek', 'peek_messages', table_transformer=transform_message_show) g.storage_command_oauth('delete', 'delete_message', transform=create_boolean_result_output_transformer( 'deleted'), table_transformer=transform_boolean_for_table) g.storage_command_oauth('clear', 'clear_messages') g.storage_command_oauth('update', 'update_message') if cosmosdb_table_exists(self.cli_ctx): table_sdk = CliCommandType(operations_tmpl='azure.multiapi.cosmosdb.table.tableservice#TableService.{}', client_factory=table_data_service_factory, resource_type=ResourceType.DATA_COSMOS_TABLE) else: table_sdk = CliCommandType(operations_tmpl='azure.multiapi.storage.table.tableservice#TableService.{}', client_factory=table_data_service_factory, resource_type=ResourceType.DATA_COSMOS_TABLE) with self.command_group('storage table', table_sdk, custom_command_type=get_custom_sdk('acl', table_data_service_factory)) as g: from ._format import transform_boolean_for_table from ._transformers import create_boolean_result_output_transformer g.storage_command('create', 'create_table', transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table) g.storage_command('delete', 'delete_table', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_command( 'exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_command( 'generate-sas', 'generate_table_shared_access_signature') g.storage_command('list', 'list_tables', transform=transform_storage_list_output) g.storage_command('stats', 'get_table_service_stats', min_api='2016-05-31') g.storage_custom_command('policy create', 'create_acl_policy') g.storage_custom_command('policy delete', 'delete_acl_policy') g.storage_custom_command( 'policy show', 'get_acl_policy', exception_handler=show_exception_handler) g.storage_custom_command( 'policy list', 'list_acl_policies', table_transformer=transform_acl_list_output) g.storage_custom_command('policy update', 'set_acl_policy') with self.command_group('storage entity', table_sdk, custom_command_type=get_custom_sdk('table', table_data_service_factory)) as g: from ._format import transform_boolean_for_table, transform_entity_show from ._transformers import (create_boolean_result_output_transformer, transform_entity_query_output, transform_entities_result, transform_entity_result) g.storage_command('query', 'query_entities', table_transformer=transform_entity_query_output, transform=transform_entities_result) g.storage_command('replace', 'update_entity') g.storage_command('merge', 'merge_entity') g.storage_command('delete', 'delete_entity', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_command('show', 'get_entity', table_transformer=transform_entity_show, exception_handler=show_exception_handler, transform=transform_entity_result) g.storage_custom_command('insert', 'insert_table_entity')
Returns a CliCommandType instance with specified operation template based on the given custom module name. This is useful when the command is not defined in the default 'custom' module but instead in a module under 'operations' package.
load_command_table
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/commands_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/commands_azure_stack.py
MIT
def get_custom_sdk(custom_module, client_factory, resource_type=ResourceType.DATA_STORAGE): """Returns a CliCommandType instance with specified operation template based on the given custom module name. This is useful when the command is not defined in the default 'custom' module but instead in a module under 'operations' package.""" return CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.{}#'.format( custom_module) + '{}', client_factory=client_factory, resource_type=resource_type )
Returns a CliCommandType instance with specified operation template based on the given custom module name. This is useful when the command is not defined in the default 'custom' module but instead in a module under 'operations' package.
load_command_table.get_custom_sdk
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/commands.py
MIT
def load_command_table(self, _): # pylint: disable=too-many-locals, too-many-statements storage_account_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#StorageAccountsOperations.{}', client_factory=cf_sa, resource_type=ResourceType.MGMT_STORAGE ) blob_service_mgmt_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#BlobServicesOperations.{}', client_factory=cf_mgmt_blob_services, resource_type=ResourceType.MGMT_STORAGE ) file_service_mgmt_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#FileServicesOperations.{}', client_factory=cf_mgmt_file_services, resource_type=ResourceType.MGMT_STORAGE ) file_shares_mgmt_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#FileSharesOperations.{}', client_factory=cf_mgmt_file_shares, resource_type=ResourceType.MGMT_STORAGE ) storage_account_sdk_keys = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#StorageAccountsOperations.{}', client_factory=cf_sa_for_keys, resource_type=ResourceType.MGMT_STORAGE ) private_link_resource_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#PrivateLinkResourcesOperations.{}', client_factory=cf_private_link, resource_type=ResourceType.MGMT_STORAGE ) private_endpoint_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#PrivateEndpointConnectionsOperations.{}', client_factory=cf_private_endpoint, resource_type=ResourceType.MGMT_STORAGE ) private_endpoint_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.account#{}', client_factory=cf_private_endpoint, resource_type=ResourceType.MGMT_STORAGE) storage_account_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.account#{}', client_factory=cf_sa) block_blob_sdk = CliCommandType( operations_tmpl='azure.multiapi.storage.blob.blockblobservice#BlockBlobService.{}', client_factory=blob_data_service_factory, resource_type=ResourceType.DATA_STORAGE) def get_custom_sdk(custom_module, client_factory, resource_type=ResourceType.DATA_STORAGE): """Returns a CliCommandType instance with specified operation template based on the given custom module name. This is useful when the command is not defined in the default 'custom' module but instead in a module under 'operations' package.""" return CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.{}#'.format( custom_module) + '{}', client_factory=client_factory, resource_type=resource_type ) with self.command_group('storage', command_type=block_blob_sdk, custom_command_type=get_custom_sdk('azcopy', blob_data_service_factory)) as g: g.storage_custom_command_oauth('remove', 'storage_remove') with self.command_group('storage', custom_command_type=get_custom_sdk('azcopy', None)) as g: from ._validators import validate_azcopy_credential g.storage_custom_command_oauth('copy', 'storage_copy', validator=validate_azcopy_credential) with self.command_group('storage account', storage_account_sdk, resource_type=ResourceType.MGMT_STORAGE, custom_command_type=storage_account_custom_type) as g: g.custom_command('check-name', 'check_name_availability') g.custom_command('create', 'create_storage_account') g.command('delete', 'delete', confirmation=True) g.show_command('show', 'get_properties') g.custom_command('list', 'list_storage_accounts') g.custom_command( 'show-usage', 'show_storage_account_usage', min_api='2018-02-01') g.custom_command( 'show-usage', 'show_storage_account_usage_no_location', max_api='2017-10-01') g.custom_command('show-connection-string', 'show_storage_account_connection_string') g.generic_update_command('update', getter_name='get_properties', setter_name='update', custom_func_name='update_storage_account', min_api='2016-12-01') g.custom_command('failover', 'begin_failover', supports_no_wait=True, is_preview=True, min_api='2018-07-01') g.command('hns-migration start', 'begin_hierarchical_namespace_migration', supports_no_wait=True, min_api='2021-06-01') g.command('hns-migration stop', 'begin_abort_hierarchical_namespace_migration', supports_no_wait=True, min_api='2021-06-01') with self.command_group('storage account', storage_account_sdk_keys, resource_type=ResourceType.MGMT_STORAGE, custom_command_type=storage_account_custom_type) as g: g.custom_command('keys renew', 'regenerate_key', transform=lambda x: getattr(x, 'keys', x)) g.command('keys list', 'list_keys', transform=lambda x: getattr(x, 'keys', x)) g.command('revoke-delegation-keys', 'revoke_user_delegation_keys', min_api='2019-04-01') account_blob_service_custom_sdk = get_custom_sdk('account', client_factory=cf_blob_service, resource_type=ResourceType.DATA_STORAGE_BLOB) with self.command_group('storage account', resource_type=ResourceType.DATA_STORAGE_BLOB, custom_command_type=account_blob_service_custom_sdk) as g: g.storage_custom_command('generate-sas', 'generate_sas') blob_inventory_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#BlobInventoryPoliciesOperations.{}', client_factory=cf_sa_blob_inventory, resource_type=ResourceType.MGMT_STORAGE ) blob_inventory_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.account#{}', client_factory=cf_sa_blob_inventory, resource_type=ResourceType.MGMT_STORAGE ) with self.command_group('storage account blob-inventory-policy', blob_inventory_sdk, custom_command_type=blob_inventory_custom_type, is_preview=True, resource_type=ResourceType.MGMT_STORAGE, min_api='2020-08-01-preview') as g: g.custom_command('create', 'create_blob_inventory_policy') g.generic_update_command('update', getter_name='get_blob_inventory_policy', getter_type=blob_inventory_custom_type, setter_name='update_blob_inventory_policy', setter_type=blob_inventory_custom_type) g.custom_command('delete', 'delete_blob_inventory_policy', confirmation=True) g.custom_show_command('show', 'get_blob_inventory_policy') encryption_scope_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#EncryptionScopesOperations.{}', client_factory=cf_mgmt_encryption_scope, resource_type=ResourceType.MGMT_STORAGE ) encryption_scope_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.account#{}', client_factory=cf_mgmt_encryption_scope, resource_type=ResourceType.MGMT_STORAGE ) with self.command_group('storage account encryption-scope', encryption_scope_sdk, custom_command_type=encryption_scope_custom_type, resource_type=ResourceType.MGMT_STORAGE, min_api='2019-06-01') as g: g.custom_command('create', 'create_encryption_scope') g.show_command('show', 'get') g.custom_command('list', 'list_encryption_scope') g.custom_command('update', 'update_encryption_scope') management_policy_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#ManagementPoliciesOperations.{}', client_factory=cf_mgmt_policy, resource_type=ResourceType.MGMT_STORAGE ) management_policy_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.account#{}', client_factory=cf_mgmt_policy) storage_blob_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.blob#{}', client_factory=cf_sa, resource_type=ResourceType.MGMT_STORAGE) with self.command_group('storage account management-policy', management_policy_sdk, resource_type=ResourceType.MGMT_STORAGE, min_api='2018-11-01', custom_command_type=management_policy_custom_type) as g: g.custom_show_command('show', 'get_management_policy') g.custom_command('create', 'create_management_policies') g.generic_update_command('update', getter_name='get_management_policy', getter_type=management_policy_custom_type, setter_name='update_management_policies', setter_type=management_policy_custom_type) g.custom_command('delete', 'delete_management_policy') with self.command_group('storage account network-rule', storage_account_sdk, custom_command_type=storage_account_custom_type, resource_type=ResourceType.MGMT_STORAGE, min_api='2017-06-01') as g: g.custom_command('add', 'add_network_rule') g.custom_command('list', 'list_network_rules') g.custom_command('remove', 'remove_network_rule') or_policy_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#ObjectReplicationPoliciesOperations.{}', client_factory=cf_or_policy, resource_type=ResourceType.MGMT_STORAGE) or_policy_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.account#{}', client_factory=cf_or_policy) with self.command_group('storage account or-policy', or_policy_sdk, is_preview=True, resource_type=ResourceType.MGMT_STORAGE, min_api='2019-06-01', custom_command_type=or_policy_custom_type) as g: g.show_command('show', 'get') g.command('list', 'list') g.custom_command('create', 'create_or_policy') g.generic_update_command('update', setter_name='update_or_policy', setter_type=or_policy_custom_type) g.command('delete', 'delete') with self.command_group('storage account or-policy rule', or_policy_sdk, is_preview=True, resource_type=ResourceType.MGMT_STORAGE, min_api='2019-06-01', custom_command_type=or_policy_custom_type) as g: g.custom_show_command('show', 'get_or_rule') g.custom_command('list', 'list_or_rules') g.custom_command('add', 'add_or_rule') g.custom_command('update', 'update_or_rule') g.custom_command('remove', 'remove_or_rule') with self.command_group('storage account private-endpoint-connection', private_endpoint_sdk, custom_command_type=private_endpoint_custom_type, is_preview=True, resource_type=ResourceType.MGMT_STORAGE, min_api='2019-06-01') as g: from ._validators import validate_private_endpoint_connection_id g.command('delete', 'delete', confirmation=True, validator=validate_private_endpoint_connection_id) g.show_command('show', 'get', validator=validate_private_endpoint_connection_id) g.custom_command('approve', 'approve_private_endpoint_connection', validator=validate_private_endpoint_connection_id) g.custom_command('reject', 'reject_private_endpoint_connection', validator=validate_private_endpoint_connection_id) with self.command_group('storage account private-link-resource', private_link_resource_sdk, resource_type=ResourceType.MGMT_STORAGE) as g: from azure.cli.core.commands.transform import gen_dict_to_list_transform g.command('list', 'list_by_storage_account', is_preview=True, min_api='2019-06-01', transform=gen_dict_to_list_transform(key="value")) with self.command_group('storage account blob-service-properties', blob_service_mgmt_sdk, custom_command_type=storage_account_custom_type, resource_type=ResourceType.MGMT_STORAGE, min_api='2018-07-01') as g: from ._transformers import transform_restore_policy_output g.show_command('show', 'get_service_properties', transform=transform_restore_policy_output) g.generic_update_command('update', getter_name='get_service_properties', setter_name='set_service_properties', custom_func_name='update_blob_service_properties', transform=transform_restore_policy_output) with self.command_group('storage account blob-service-properties cors-rule', blob_service_mgmt_sdk, resource_type=ResourceType.MGMT_STORAGE, min_api='2022-09-01', custom_command_type=get_custom_sdk('account', client_factory=cf_mgmt_blob_services, resource_type=ResourceType.MGMT_STORAGE)) as g: g.custom_command('list', 'list_blob_cors_rules') g.custom_command('add', 'add_blob_cors_rule') g.custom_command('clear', 'clear_blob_cors_rules') with self.command_group('storage account file-service-properties', file_service_mgmt_sdk, custom_command_type=get_custom_sdk('account', client_factory=cf_mgmt_file_services, resource_type=ResourceType.MGMT_STORAGE), resource_type=ResourceType.MGMT_STORAGE, min_api='2019-06-01') as g: g.show_command('show', 'get_service_properties') g.generic_update_command('update', getter_name='get_service_properties', setter_name='set_service_properties', custom_func_name='update_file_service_properties') local_users_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#' 'LocalUsersOperations.{}', client_factory=cf_local_users, resource_type=ResourceType.MGMT_STORAGE ) local_users_custom_type = CliCommandType( operations_tmpl='azure.cli.command_modules.storage.operations.account#{}', client_factory=cf_local_users, resource_type=ResourceType.MGMT_STORAGE ) with self.command_group('storage account local-user', local_users_sdk, custom_command_type=local_users_custom_type, resource_type=ResourceType.MGMT_STORAGE, min_api='2021-08-01') as g: g.custom_command('create', 'create_local_user') g.custom_command('update', 'update_local_user') g.command('delete', 'delete') g.command('list', 'list') g.show_command('show', 'get') g.command('list-keys', 'list_keys') g.command('regenerate-password', 'regenerate_password') with self.command_group('storage logging', get_custom_sdk('logging', multi_service_properties_factory)) as g: from ._transformers import transform_logging_list_output g.storage_command('update', 'set_logging') g.storage_command('show', 'get_logging', table_transformer=transform_logging_list_output, exception_handler=show_exception_handler) g.storage_command('off', 'disable_logging', is_preview=True) with self.command_group('storage metrics', get_custom_sdk('metrics', multi_service_properties_factory)) as g: from ._transformers import transform_metrics_list_output g.storage_command('update', 'set_metrics') g.storage_command('show', 'get_metrics', table_transformer=transform_metrics_list_output, exception_handler=show_exception_handler) blob_client_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.blob._blob_client#BlobClient.{}', client_factory=cf_blob_client, resource_type=ResourceType.DATA_STORAGE_BLOB ) with self.command_group('storage blob', blob_client_sdk, resource_type=ResourceType.DATA_STORAGE_BLOB, min_api='2019-02-02', custom_command_type=get_custom_sdk('blob', client_factory=cf_blob_client, resource_type=ResourceType.DATA_STORAGE_BLOB)) as g: from ._transformers import (transform_blob_list_output, transform_blob_json_output, transform_blob_upload_output, transform_url_without_encode, create_boolean_result_output_transformer) from ._format import transform_blob_output, transform_boolean_for_table from ._exception_handler import file_related_exception_handler from ._validators import (process_blob_upload_batch_parameters, process_blob_download_batch_parameters, process_blob_delete_batch_parameters) g.storage_custom_command_oauth('copy start', 'copy_blob') g.storage_command_oauth('copy cancel', 'abort_copy') g.storage_custom_command_oauth('show', 'show_blob_v2', transform=transform_blob_json_output, table_transformer=transform_blob_output, exception_handler=show_exception_handler) g.storage_custom_command_oauth('set-tier', 'set_blob_tier_v2') g.storage_custom_command_oauth('list', 'list_blobs', client_factory=cf_container_client, transform=transform_blob_list_output, table_transformer=transform_blob_output) g.storage_custom_command_oauth('query', 'query_blob', is_preview=True, min_api='2019-12-12') g.storage_custom_command_oauth('rewrite', 'rewrite_blob', is_preview=True, min_api='2020-04-08') g.storage_command_oauth('set-legal-hold', 'set_legal_hold', min_api='2020-10-02') g.storage_custom_command_oauth('immutability-policy set', 'set_immutability_policy', min_api='2020-10-02') g.storage_command_oauth('immutability-policy delete', 'delete_immutability_policy', min_api='2020-10-02') g.storage_custom_command_oauth('upload', 'upload_blob', transform=transform_blob_upload_output, exception_handler=file_related_exception_handler) g.storage_custom_command_oauth('upload-batch', 'storage_blob_upload_batch', client_factory=cf_blob_service, validator=process_blob_upload_batch_parameters, exception_handler=file_related_exception_handler) g.storage_custom_command_oauth('download', 'download_blob', transform=transform_blob_json_output, table_transformer=transform_blob_output, exception_handler=file_related_exception_handler) g.storage_custom_command_oauth('download-batch', 'storage_blob_download_batch', client_factory=cf_blob_service, validator=process_blob_download_batch_parameters, exception_handler=file_related_exception_handler) g.storage_custom_command_oauth('url', 'create_blob_url', client_factory=cf_blob_service, transform=transform_url_without_encode) g.storage_command_oauth('metadata show', 'get_blob_properties', exception_handler=show_exception_handler, transform=lambda x: x.metadata) g.storage_command_oauth('metadata update', 'set_blob_metadata') g.storage_command_oauth('snapshot', 'create_snapshot') g.storage_command_oauth('update', 'set_http_headers') g.storage_command_oauth('exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_command_oauth('delete', 'delete_blob') g.storage_command_oauth('undelete', 'undelete_blob', transform=create_boolean_result_output_transformer('undeleted'), table_transformer=transform_boolean_for_table) g.storage_custom_command_oauth('delete-batch', 'storage_blob_delete_batch', client_factory=cf_blob_service, validator=process_blob_delete_batch_parameters) blob_service_custom_sdk = get_custom_sdk('blob', client_factory=cf_blob_service, resource_type=ResourceType.DATA_STORAGE_BLOB) with self.command_group('storage blob', resource_type=ResourceType.DATA_STORAGE_BLOB, custom_command_type=blob_service_custom_sdk) as g: g.storage_custom_command_oauth('generate-sas', 'generate_sas_blob_uri') blob_service_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.blob._blob_service_client#BlobServiceClient.{}', client_factory=cf_blob_service, resource_type=ResourceType.DATA_STORAGE_BLOB ) with self.command_group('storage blob', blob_service_sdk, resource_type=ResourceType.DATA_STORAGE_BLOB, custom_command_type=blob_service_custom_sdk) as g: from ._transformers import transform_blob_json_output_track2 g.storage_custom_command_oauth('copy start-batch', 'storage_blob_copy_batch') g.storage_command_oauth('service-properties show', 'get_service_properties', transform=transform_blob_json_output_track2, exception_handler=show_exception_handler) g.storage_command_oauth('service-properties update', generic_update=True, getter_name='get_service_properties', setter_type=get_custom_sdk('blob', cf_blob_service), setter_name='set_service_properties_track2', custom_func_name='transform_blob_generic_output', client_factory=lambda cli_ctx, kwargs: cf_blob_service(cli_ctx, kwargs.copy()), transform=transform_blob_json_output_track2) g.storage_command_oauth('service-properties delete-policy show', 'get_service_properties', transform=lambda x: x['delete_retention_policy'], exception_handler=show_exception_handler) g.storage_custom_command_oauth('service-properties delete-policy update', 'set_delete_policy') blob_lease_client_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.blob._lease#BlobLeaseClient.{}', client_factory=cf_blob_lease_client, resource_type=ResourceType.DATA_STORAGE_BLOB ) with self.command_group('storage blob lease', blob_lease_client_sdk, resource_type=ResourceType.DATA_STORAGE_BLOB, min_api='2019-02-02', custom_command_type=get_custom_sdk('blob', client_factory=cf_blob_lease_client, resource_type=ResourceType.DATA_STORAGE_BLOB)) as g: g.storage_custom_command_oauth('acquire', 'acquire_blob_lease') g.storage_command_oauth('break', 'break_lease') g.storage_command_oauth('change', 'change') g.storage_custom_command_oauth('renew', 'renew_blob_lease') g.storage_command_oauth('release', 'release') with self.command_group('storage blob', storage_account_sdk, resource_type=ResourceType.MGMT_STORAGE, custom_command_type=storage_blob_custom_type) as g: g.custom_command('restore', 'restore_blob_ranges', min_api='2019-06-01', supports_no_wait=True) with self.command_group('storage blob incremental-copy', operations_tmpl='azure.multiapi.storage.blob.pageblobservice#PageBlobService.{}', client_factory=page_blob_service_factory, resource_type=ResourceType.DATA_STORAGE, min_api='2016-05-31') as g: g.storage_command_oauth('start', 'incremental_copy_blob') with self.command_group('storage blob incremental-copy', operations_tmpl='azure.multiapi.storage.blob.blockblobservice#BlockBlobService.{}', client_factory=page_blob_service_factory, resource_type=ResourceType.DATA_STORAGE, min_api='2016-05-31') as g: g.storage_command_oauth('cancel', 'abort_copy_blob') with self.command_group('storage blob', command_type=block_blob_sdk, custom_command_type=get_custom_sdk('azcopy', blob_data_service_factory)) as g: g.storage_custom_command_oauth('sync', 'storage_blob_sync', is_preview=True) container_client_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.blob._container_client#' 'ContainerClient.{}', client_factory=cf_container_client, resource_type=ResourceType.DATA_STORAGE_BLOB ) with self.command_group('storage container', command_type=container_client_sdk, resource_type=ResourceType.DATA_STORAGE_BLOB, custom_command_type=get_custom_sdk('blob', client_factory=cf_container_client, resource_type=ResourceType.DATA_STORAGE_BLOB)) as g: from ._transformers import transform_container_json_output, transform_container_permission_output from azure.cli.command_modules.storage._format import transform_container_show g.storage_command_oauth('show', 'get_container_properties', transform=transform_container_json_output, table_transformer=transform_container_show, exception_handler=show_exception_handler) g.storage_command_oauth('exists', 'exists', transform=create_boolean_result_output_transformer('exists'), table_transformer=transform_boolean_for_table) g.storage_custom_command('set-permission', 'set_container_permission') g.storage_command('show-permission', 'get_container_access_policy', transform=transform_container_permission_output) g.storage_command_oauth('metadata update', 'set_container_metadata') g.storage_command_oauth('metadata show', 'get_container_properties', exception_handler=show_exception_handler, transform=lambda x: getattr(x, 'metadata', x)) container_lease_client_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.blob._lease#BlobLeaseClient.{}', client_factory=cf_container_lease_client, resource_type=ResourceType.DATA_STORAGE_BLOB ) with self.command_group('storage container lease', container_lease_client_sdk, resource_type=ResourceType.DATA_STORAGE_BLOB, custom_command_type=get_custom_sdk('blob', client_factory=cf_container_lease_client, resource_type=ResourceType.DATA_STORAGE_BLOB)) as g: g.storage_custom_command_oauth('acquire', 'acquire_blob_lease') g.storage_custom_command_oauth('renew', 'renew_blob_lease') g.storage_command_oauth('break', 'break_lease') g.storage_command_oauth('change', 'change') g.storage_command_oauth('release', 'release') with self.command_group('storage container', command_type=blob_service_sdk, custom_command_type=blob_service_custom_sdk, resource_type=ResourceType.DATA_STORAGE_BLOB) as g: from ._transformers import transform_container_list_output from azure.cli.command_modules.storage._format import transform_container_list from ._validators import process_container_delete_parameters g.storage_custom_command_oauth('create', 'create_container', transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table) g.storage_custom_command_oauth('delete', 'delete_container', validator=process_container_delete_parameters, transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_custom_command_oauth('list', 'list_containers', min_api='2019-02-02', transform=transform_container_list_output, table_transformer=transform_container_list) g.storage_custom_command_oauth('generate-sas', 'generate_container_shared_access_signature') g.storage_command_oauth('restore', 'undelete_container', min_api='2020-02-10') with self.command_group('storage container policy', resource_type=ResourceType.DATA_STORAGE_BLOB, custom_command_type=get_custom_sdk('container_access_policy', client_factory=cf_container_client, resource_type=ResourceType.DATA_STORAGE_BLOB)) as g: from ._transformers import transform_acl_list_output, transform_acl_edit, transform_acl_datetime g.storage_custom_command('create', 'create_acl_policy', transform=transform_acl_edit) g.storage_custom_command('delete', 'delete_acl_policy', transform=transform_acl_edit) g.storage_custom_command( 'update', 'set_acl_policy', transform=transform_acl_edit) g.storage_custom_command( 'show', 'get_acl_policy', transform=transform_acl_datetime, exception_handler=show_exception_handler) g.storage_custom_command( 'list', 'list_acl_policies', table_transformer=transform_acl_list_output) blob_container_mgmt_sdk = CliCommandType( operations_tmpl='azure.mgmt.storage.operations#BlobContainersOperations.{}', client_factory=cf_blob_container_mgmt, resource_type=ResourceType.MGMT_STORAGE ) with self.command_group('storage container immutability-policy', command_type=blob_container_mgmt_sdk, custom_command_type=get_custom_sdk('blob', cf_blob_container_mgmt, resource_type=ResourceType.MGMT_STORAGE), min_api='2018-02-01') as g: from azure.cli.command_modules.storage._transformers import transform_immutability_policy g.show_command('show', 'get_immutability_policy', transform=transform_immutability_policy) g.custom_command('create', 'create_or_update_immutability_policy') g.command('delete', 'delete_immutability_policy', transform=lambda x: None) g.command('lock', 'lock_immutability_policy') g.custom_command('extend', 'extend_immutability_policy') with self.command_group('storage container legal-hold', command_type=blob_container_mgmt_sdk, custom_command_type=get_custom_sdk('blob', cf_blob_container_mgmt, resource_type=ResourceType.MGMT_STORAGE), min_api='2018-02-01') as g: g.custom_command('set', 'set_legal_hold') g.custom_command('clear', 'clear_legal_hold') g.show_command( 'show', 'get', transform=lambda x: getattr(x, 'legal_hold', x)) with self.command_group('storage container-rm', command_type=blob_container_mgmt_sdk, custom_command_type=get_custom_sdk('blob', cf_blob_container_mgmt, resource_type=ResourceType.MGMT_STORAGE), resource_type=ResourceType.MGMT_STORAGE, min_api='2018-02-01') as g: g.custom_command('create', 'create_container_rm') g.command('delete', 'delete', confirmation=True) g.generic_update_command('update', setter_name='update', max_api='2019-04-01') g.generic_update_command('update', setter_name='update', setter_arg_name='blob_container', custom_func_name='update_container_rm', min_api='2019-06-01') g.custom_command('list', 'list_container_rm') g.custom_command('exists', 'container_rm_exists', transform=create_boolean_result_output_transformer('exists'), table_transformer=transform_boolean_for_table) g.show_command('show', 'get') g.command('migrate-vlw', 'begin_object_level_worm', supports_no_wait=True, is_preview=True) share_client_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.fileshare._share_client#ShareClient.{}', client_factory=cf_share_client, resource_type=ResourceType.DATA_STORAGE_FILESHARE) share_service_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.fileshare._share_service_client#ShareServiceClient.{}', client_factory=cf_share_service, resource_type=ResourceType.DATA_STORAGE_FILESHARE) directory_client_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.fileshare._directory_client#ShareDirectoryClient.{}', client_factory=cf_share_directory_client, resource_type=ResourceType.DATA_STORAGE_FILESHARE) file_client_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.fileshare._file_client#ShareFileClient.{}', client_factory=cf_share_file_client, resource_type=ResourceType.DATA_STORAGE_FILESHARE) with self.command_group('storage share-rm', command_type=file_shares_mgmt_sdk, custom_command_type=get_custom_sdk('file', cf_mgmt_file_shares, resource_type=ResourceType.MGMT_STORAGE), resource_type=ResourceType.MGMT_STORAGE, min_api='2019-04-01') as g: from ._transformers import transform_share_rm_output g.custom_command('exists', '_file_share_exists', transform=create_boolean_result_output_transformer('exists')) g.custom_command('stats', 'get_stats', transform=lambda x: getattr(x, 'share_usage_bytes')) g.custom_command('restore', 'restore_share_rm') g.custom_command('snapshot', 'snapshot_share_rm', min_api='2020-08-01-preview', is_preview=True, transform=transform_share_rm_output) with self.command_group('storage share-rm'): from .operations.file import ShareRmCreate, ShareRmUpdate, ShareRmDelete, ShareRmShow, ShareRmList self.command_table['storage share-rm create'] = ShareRmCreate(loader=self) self.command_table['storage share-rm update'] = ShareRmUpdate(loader=self) self.command_table['storage share-rm delete'] = ShareRmDelete(loader=self) self.command_table['storage share-rm show'] = ShareRmShow(loader=self) self.command_table['storage share-rm list'] = ShareRmList(loader=self) with self.command_group('storage share', command_type=share_client_sdk, custom_command_type=get_custom_sdk('fileshare', cf_share_client, ResourceType.DATA_STORAGE_FILESHARE), resource_type=ResourceType.DATA_STORAGE_FILESHARE, min_api='2019-02-02') as g: from ._format import transform_boolean_for_table from ._transformers import (transform_file_share_json_output, transform_share_list_handle) g.storage_custom_command('create', 'create_share', transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table) g.storage_custom_command('delete', 'delete_share', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_custom_command('generate-sas', 'generate_share_sas') g.storage_custom_command('stats', 'get_share_stats') g.storage_custom_command('snapshot', 'create_snapshot') g.storage_command('show', 'get_share_properties', exception_handler=show_exception_handler, transform=transform_file_share_json_output) g.storage_custom_command('exists', 'share_exists', transform=create_boolean_result_output_transformer('exists')) g.storage_command('update', 'set_share_quota') g.storage_command('metadata show', 'get_share_properties', exception_handler=show_exception_handler, transform=lambda x: getattr(x, 'metadata', x)) g.storage_custom_command('metadata update', 'set_share_metadata', transform=create_boolean_result_output_transformer('updated')) g.storage_custom_command_oauth('list-handle', 'list_handle', transform=transform_share_list_handle) g.storage_custom_command_oauth('close-handle', 'close_handle') with self.command_group('storage share', command_type=share_service_sdk, custom_command_type=get_custom_sdk('fileshare', cf_share_service, ResourceType.DATA_STORAGE_FILESHARE), resource_type=ResourceType.DATA_STORAGE_FILESHARE, min_api='2019-02-02') as g: from ._transformers import transform_storage_list_output from ._format import transform_share_list from ._transformers import transform_url_without_encode g.storage_custom_command('list', 'list_shares', transform=transform_storage_list_output, table_transformer=transform_share_list) g.storage_custom_command('url', 'create_share_url', transform=transform_url_without_encode) with self.command_group('storage share policy', custom_command_type=get_custom_sdk('access_policy', cf_share_client, ResourceType.DATA_STORAGE_FILESHARE), resource_type=ResourceType.DATA_STORAGE_FILESHARE, min_api='2019-02-02') as g: from ._transformers import transform_acl_list_output, transform_acl_edit, transform_acl_datetime g.storage_custom_command('create', 'create_acl_policy', transform=transform_acl_edit) g.storage_custom_command('delete', 'delete_acl_policy', transform=transform_acl_edit) g.storage_custom_command( 'show', 'get_acl_policy', exception_handler=show_exception_handler, transform=transform_acl_datetime) g.storage_custom_command( 'list', 'list_acl_policies', table_transformer=transform_acl_list_output) g.storage_custom_command('update', 'set_acl_policy', transform=transform_acl_edit) with self.command_group('storage directory', command_type=directory_client_sdk, resource_type=ResourceType.DATA_STORAGE_FILESHARE, custom_command_type=get_custom_sdk('directory', cf_share_directory_client)) as g: from ._transformers import transform_share_directory_json_output from ._format import transform_file_directory_result, transform_file_output g.storage_custom_command_oauth('create', 'create_directory', transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table) g.storage_custom_command_oauth('delete', 'delete_directory', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_custom_command_oauth('show', 'get_directory_properties', transform=transform_share_directory_json_output, table_transformer=transform_file_output, exception_handler=show_exception_handler) g.storage_command_oauth('exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_command_oauth('metadata show', 'get_directory_properties', exception_handler=show_exception_handler, transform=lambda x: getattr(x, 'metadata', x)) g.storage_command_oauth('metadata update', 'set_directory_metadata') g.storage_custom_command_oauth('list', 'list_share_directories', transform=transform_file_directory_result, table_transformer=transform_file_output) with self.command_group('storage file', command_type=file_client_sdk, resource_type=ResourceType.DATA_STORAGE_FILESHARE, custom_command_type=get_custom_sdk('file', cf_share_file_client)) as g: from ._transformers import transform_file_show_result from ._format import transform_metadata_show, transform_boolean_for_table, transform_file_output from ._exception_handler import file_related_exception_handler g.storage_custom_command_oauth('list', 'list_share_files', client_factory=cf_share_client, transform=transform_file_directory_result, table_transformer=transform_file_output) g.storage_command_oauth('delete', 'delete_file', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_custom_command_oauth('delete-batch', 'storage_file_delete_batch', client_factory=cf_share_client) g.storage_command_oauth('resize', 'resize_file') g.storage_custom_command_oauth('url', 'create_file_url', transform=transform_url_without_encode, client_factory=cf_share_client) g.storage_custom_command('generate-sas', 'generate_sas_file', client_factory=cf_share_client) g.storage_command_oauth('show', 'get_file_properties', transform=transform_file_show_result, table_transformer=transform_file_output, exception_handler=show_exception_handler) g.storage_custom_command_oauth('update', 'file_updates') g.storage_custom_command_oauth('exists', 'file_exists', transform=create_boolean_result_output_transformer('exists')) g.storage_command_oauth('metadata show', 'get_file_properties', exception_handler=show_exception_handler, transform=transform_metadata_show) g.storage_command_oauth('metadata update', 'set_file_metadata') g.storage_custom_command_oauth('copy start', 'storage_file_copy') g.storage_command_oauth('copy cancel', 'abort_copy') g.storage_custom_command('copy start-batch', 'storage_file_copy_batch', client_factory=cf_share_client) g.storage_custom_command_oauth('upload', 'storage_file_upload', exception_handler=file_related_exception_handler) g.storage_custom_command('upload-batch', 'storage_file_upload_batch', custom_command_type=get_custom_sdk('file', client_factory=cf_share_client)) g.storage_custom_command_oauth('download', 'download_file', exception_handler=file_related_exception_handler, transform=transform_file_show_result) g.storage_custom_command('download-batch', 'storage_file_download_batch', client_factory=cf_share_client) with self.command_group('storage cors', get_custom_sdk('cors', multi_service_properties_factory)) as g: from ._transformers import transform_cors_list_output g.storage_command('add', 'add_cors') g.storage_command('clear', 'clear_cors') g.storage_command('list', 'list_cors', transform=transform_cors_list_output) queue_client_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.queue._queue_client#QueueClient.{}', client_factory=cf_queue_client, resource_type=ResourceType.DATA_STORAGE_QUEUE) with self.command_group('storage queue', command_type=queue_client_sdk, is_preview=True, custom_command_type=get_custom_sdk('queue', cf_queue_client, ResourceType.DATA_STORAGE_QUEUE), resource_type=ResourceType.DATA_STORAGE_QUEUE, min_api='2018-03-28') as g: from ._format import transform_boolean_for_table from ._transformers import create_boolean_result_output_transformer g.storage_custom_command_oauth('create', 'create_queue', transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table) g.storage_custom_command_oauth('delete', 'delete_queue', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_custom_command( 'generate-sas', 'generate_queue_sas') g.storage_custom_command_oauth('exists', 'queue_exists', transform=create_boolean_result_output_transformer('exists')) g.storage_command_oauth( 'metadata show', 'get_queue_properties', exception_handler=show_exception_handler, transform=lambda x: getattr(x, 'metadata', x)) g.storage_command_oauth('metadata update', 'set_queue_metadata', transform=create_boolean_result_output_transformer('updated')) with self.command_group('storage queue policy', command_type=queue_client_sdk, is_preview=True, custom_command_type=get_custom_sdk('access_policy', cf_queue_client, ResourceType.DATA_STORAGE_QUEUE), resource_type=ResourceType.DATA_STORAGE_QUEUE, min_api='2018-03-28') as g: from ._transformers import (transform_acl_list_output, transform_queue_policy_output, transform_queue_policy_json_output) g.storage_custom_command_oauth('create', 'create_acl_policy') g.storage_custom_command_oauth('delete', 'delete_acl_policy') g.storage_custom_command_oauth('show', 'get_acl_policy', transform=transform_queue_policy_output, exception_handler=show_exception_handler) g.storage_custom_command_oauth('list', 'list_acl_policies', transform=transform_queue_policy_json_output, table_transformer=transform_acl_list_output) g.storage_custom_command_oauth('update', 'set_acl_policy') with self.command_group('storage message', command_type=queue_client_sdk, is_preview=True, custom_command_type=get_custom_sdk('queue', cf_queue_client, ResourceType.DATA_STORAGE_QUEUE), resource_type=ResourceType.DATA_STORAGE_QUEUE, min_api='2018-03-28') as g: from ._transformers import (transform_message_list_output, transform_message_output) from ._format import transform_message_show g.storage_command_oauth('put', 'send_message', transform=transform_message_output) g.storage_custom_command_oauth('get', 'receive_messages', transform=transform_message_list_output, table_transformer=transform_message_show) g.storage_command_oauth('peek', 'peek_messages', transform=transform_message_list_output, table_transformer=transform_message_show) g.storage_command_oauth('delete', 'delete_message', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_command_oauth('clear', 'clear_messages') g.storage_command_oauth('update', 'update_message', transform=transform_message_output) queue_service_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.queue._queue_service_client#QueueServiceClient.{}', client_factory=cf_queue_service, resource_type=ResourceType.DATA_STORAGE_QUEUE) with self.command_group('storage queue', command_type=queue_service_sdk, is_preview=True, custom_command_type=get_custom_sdk('queue', cf_queue_service, ResourceType.DATA_STORAGE_QUEUE), resource_type=ResourceType.DATA_STORAGE_QUEUE, min_api='2018-03-28') as g: from ._transformers import transform_queue_stats_output g.storage_command_oauth('stats', 'get_service_stats', transform=transform_queue_stats_output) g.storage_custom_command_oauth('list', 'list_queues', transform=transform_storage_list_output) table_service_sdk = CliCommandType(operations_tmpl='azure.data.tables._table_service_client#TableServiceClient.{}', client_factory=cf_table_service, resource_type=ResourceType.DATA_STORAGE_TABLE) table_client_sdk = CliCommandType(operations_tmpl='azure.data.tables._table_client#TableClient.{}', client_factory=cf_table_client, resource_type=ResourceType.DATA_STORAGE_TABLE) with self.command_group('storage table', table_service_sdk, resource_type=ResourceType.DATA_STORAGE_TABLE, custom_command_type=get_custom_sdk('table', cf_table_service)) as g: from ._transformers import transform_table_stats_output g.storage_custom_command_oauth('create', 'create_table', transform=create_boolean_result_output_transformer('created'), table_transformer=transform_boolean_for_table) g.storage_custom_command_oauth('delete', 'delete_table', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_custom_command_oauth('exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_custom_command('generate-sas', 'generate_sas') g.storage_custom_command_oauth('list', 'list_tables') g.storage_command_oauth('stats', 'get_service_stats', transform=transform_table_stats_output) with self.command_group('storage table policy', table_client_sdk, resource_type=ResourceType.DATA_STORAGE_TABLE, custom_command_type=get_custom_sdk('access_policy', cf_table_client)) as g: g.storage_custom_command('create', 'create_acl_policy') g.storage_custom_command('delete', 'delete_acl_policy') g.storage_custom_command('show', 'get_acl_policy', exception_handler=show_exception_handler) g.storage_custom_command('list', 'list_acl_policies', table_transformer=transform_acl_list_output) g.storage_custom_command('update', 'set_acl_policy') with self.command_group('storage entity', table_client_sdk, resource_type=ResourceType.DATA_STORAGE_TABLE, custom_command_type=get_custom_sdk('table', cf_table_client)) as g: from ._format import transform_entity_show from ._transformers import (transform_entity_query_output, transform_entities_result, transform_entity_result) g.storage_custom_command_oauth('insert', 'insert_entity') g.storage_custom_command_oauth('replace', 'replace_entity') g.storage_custom_command_oauth('merge', 'merge_entity') g.storage_custom_command_oauth('delete', 'delete_entity', transform=create_boolean_result_output_transformer('deleted'), table_transformer=transform_boolean_for_table) g.storage_command_oauth('show', 'get_entity', table_transformer=transform_entity_show, transform=transform_entity_result, exception_handler=show_exception_handler) g.storage_custom_command_oauth('query', 'query_entity', table_transformer=transform_entity_query_output, transform=transform_entities_result) adls_service_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.filedatalake._data_lake_service_client#DataLakeServiceClient.{}', client_factory=cf_adls_service, resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE ) adls_fs_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.filedatalake._file_system_client#FileSystemClient.{}', client_factory=cf_adls_file_system, resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE ) adls_directory_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.filedatalake._data_lake_directory_client#DataLakeDirectoryClient.{}', client_factory=cf_adls_directory, resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE ) custom_adls_directory_sdk = get_custom_sdk(custom_module='fs_directory', client_factory=cf_adls_directory, resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE) adls_file_sdk = CliCommandType( operations_tmpl='azure.multiapi.storagev2.filedatalake._data_lake_file_client#DataLakeFileClient.{}', client_factory=cf_adls_file, resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE ) with self.command_group('storage fs', adls_fs_sdk, resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE, custom_command_type=get_custom_sdk('filesystem', cf_adls_file_system), min_api='2018-11-09') as g: from ._transformers import transform_fs_list_public_access_output, transform_fs_public_access_output, \ transform_metadata g.storage_custom_command_oauth('create', 'create_file_system') g.storage_command_oauth('list', 'list_file_systems', command_type=adls_service_sdk, transform=transform_fs_list_public_access_output) g.storage_command_oauth('show', 'get_file_system_properties', exception_handler=show_exception_handler, transform=transform_fs_public_access_output) g.storage_command_oauth('delete', 'delete_file_system', confirmation=True) g.storage_custom_command_oauth('exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_command_oauth('metadata update', 'set_file_system_metadata') g.storage_command_oauth('metadata show', 'get_file_system_properties', exception_handler=show_exception_handler, transform=transform_metadata) g.storage_custom_command_oauth('generate-sas', 'generate_sas_fs_uri', is_preview=True, custom_command_type=get_custom_sdk('filesystem', client_factory=cf_adls_service)) g.storage_custom_command_oauth('list-deleted-path', 'list_deleted_path', min_api='2020-06-12') g.storage_command_oauth('undelete-path', '_undelete_path', min_api='2020-06-12') with self.command_group('storage fs service-properties', command_type=adls_service_sdk, custom_command_type=get_custom_sdk('filesystem', cf_adls_service), resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE, min_api='2020-06-12') as g: g.storage_command_oauth('show', 'get_service_properties', exception_handler=show_exception_handler) g.storage_custom_command_oauth('update', 'set_service_properties') with self.command_group('storage fs directory', adls_directory_sdk, custom_command_type=get_custom_sdk('fs_directory', cf_adls_directory), resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE, min_api='2018-11-09') as g: from ._transformers import transform_storage_list_output, transform_metadata g.storage_command_oauth('create', 'create_directory') g.storage_custom_command_oauth('exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_custom_command_oauth('show', 'get_directory_properties', exception_handler=show_exception_handler) g.storage_command_oauth('delete', 'delete_directory', confirmation=True) g.storage_command_oauth('move', 'rename_directory') g.storage_custom_command_oauth('list', 'list_fs_directories', client_factory=cf_adls_file_system, transform=transform_storage_list_output) g.storage_command_oauth('metadata update', 'set_metadata') g.storage_command_oauth('metadata show', 'get_directory_properties', exception_handler=show_exception_handler, transform=transform_metadata) g.storage_custom_command_oauth('generate-sas', 'generate_sas_directory_uri', custom_command_type=get_custom_sdk('fs_directory', client_factory=cf_adls_service), min_api='2020-02-10') with self.command_group('storage fs directory', custom_command_type=get_custom_sdk('azcopy', None))as g: g.storage_custom_command_oauth('upload', 'storage_fs_directory_copy', is_preview=True) g.storage_custom_command_oauth('download', 'storage_fs_directory_copy', is_preview=True) with self.command_group('storage fs file', adls_file_sdk, resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE, custom_command_type=get_custom_sdk('fs_file', cf_adls_file), min_api='2018-11-09') as g: from ._transformers import transform_storage_list_output, create_boolean_result_output_transformer g.storage_command_oauth('create', 'create_file') g.storage_custom_command_oauth('upload', 'upload_file') g.storage_custom_command_oauth('exists', 'exists', transform=create_boolean_result_output_transformer('exists')) g.storage_custom_command_oauth('append', 'append_file') g.storage_custom_command_oauth('download', 'download_file') g.storage_custom_command_oauth('show', 'get_file_properties', exception_handler=show_exception_handler) g.storage_custom_command_oauth('list', 'list_fs_files', custom_command_type=get_custom_sdk('fs_file', cf_adls_file_system), transform=transform_storage_list_output) g.storage_command_oauth('move', 'rename_file') g.storage_command_oauth('delete', 'delete_file', confirmation=True) g.storage_command_oauth('metadata update', 'set_metadata') g.storage_command_oauth('metadata show', 'get_file_properties', exception_handler=show_exception_handler, transform=transform_metadata) g.storage_command_oauth('set-expiry', "set_file_expiry") with self.command_group('storage fs access', adls_directory_sdk, custom_command_type=custom_adls_directory_sdk, resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE, min_api='2018-11-09') as g: from ._transformers import transform_fs_access_output g.storage_command_oauth('set', 'set_access_control') g.storage_command_oauth('show', 'get_access_control', transform=transform_fs_access_output) g.storage_custom_command_oauth('set-recursive', 'set_access_control_recursive', min_api='2020-02-10') g.storage_custom_command_oauth('update-recursive', 'update_access_control_recursive', min_api='2020-02-10') g.storage_custom_command_oauth('remove-recursive', 'remove_access_control_recursive', min_api='2020-02-10') with self.command_group('storage account migration'): from .operations.account import AccountMigrationStart self.command_table['storage account migration start'] = AccountMigrationStart(loader=self) with self.command_group('storage account'): from .operations.account import FileServiceUsage self.command_table['storage account file-service-usage'] = FileServiceUsage(loader=self)
Returns a CliCommandType instance with specified operation template based on the given custom module name. This is useful when the command is not defined in the default 'custom' module but instead in a module under 'operations' package.
load_command_table
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/commands.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/commands.py
MIT
def transform_acl_list_output(result): """ Transform to convert SDK output into a form that is more readily usable by the CLI and tools such as jpterm. """ from collections import OrderedDict new_result = [] for key in sorted(result.keys()): new_entry = OrderedDict() new_entry['Name'] = key new_entry['Start'] = result[key]['start'] new_entry['Expiry'] = result[key]['expiry'] new_entry['Permissions'] = result[key]['permission'] new_result.append(new_entry) return new_result
Transform to convert SDK output into a form that is more readily usable by the CLI and tools such as jpterm.
transform_acl_list_output
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_transformers_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_transformers_azure_stack.py
MIT
def transform_url(result): """ Ensures the resulting URL string does not contain extra / characters """ import re result = re.sub('//', '/', result) result = re.sub('/', '//', result, count=1) return encode_url_path(result)
Ensures the resulting URL string does not contain extra / characters
transform_url
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_transformers_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_transformers_azure_stack.py
MIT
def transform_fs_access_output(result): """ Transform to convert SDK output into a form that is more readily usable by the CLI and tools such as jpterm. """ new_result = {} useful_keys = ['acl', 'group', 'owner', 'permissions'] for key in useful_keys: new_result[key] = result[key] return new_result
Transform to convert SDK output into a form that is more readily usable by the CLI and tools such as jpterm.
transform_fs_access_output
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_transformers_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_transformers_azure_stack.py
MIT
def transform_fs_public_access_output(result): """ Transform to convert SDK output into a form that is more readily usable by the CLI and tools such as jpterm. """ if result.public_access == 'blob': result.public_access = 'file' if result.public_access == 'container': result.public_access = 'filesystem' return result
Transform to convert SDK output into a form that is more readily usable by the CLI and tools such as jpterm.
transform_fs_public_access_output
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_transformers_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_transformers_azure_stack.py
MIT
def transform_fs_list_public_access_output(result): """ Transform to convert SDK output into a form that is more readily usable by the CLI and tools such as jpterm. """ new_result = list(result) for i, item in enumerate(new_result): new_result[i] = transform_fs_public_access_output(item) return new_result
Transform to convert SDK output into a form that is more readily usable by the CLI and tools such as jpterm.
transform_fs_list_public_access_output
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_transformers_azure_stack.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_transformers_azure_stack.py
MIT
def make_encoded_file_url_and_params(file_service, share, file_dir, file_name, sas_token, safe=SAFE_CHARS): """ Makes the file url using the service. Converts the file directory and name into byte-strings if needed and returns (url, dir, file) as a tuple. This is needed to account for string encoding differences between python 2 and 3. """ try: file_url = file_service.make_file_url(share, file_dir, file_name, sas_token=sas_token) except UnicodeEncodeError: file_dir = file_dir.encode('utf-8') file_name = file_name.encode('utf-8') file_url = file_service.make_file_url(share, file_dir, file_name, sas_token=sas_token) if not file_dir: sep = file_url.find('://') file_url = file_url[:sep + 3] + file_url[sep + 3:].replace('//', '/') return encode_url_path(file_url, safe), file_dir, file_name
Makes the file url using the service. Converts the file directory and name into byte-strings if needed and returns (url, dir, file) as a tuple. This is needed to account for string encoding differences between python 2 and 3.
make_encoded_file_url_and_params
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/url_quote_util.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/url_quote_util.py
MIT
def _dont_fail_on_exist(ex, error_code): """ don't throw exception if the resource doesn't exist. This is called by create_* APIs with fail_on_exist=False :param error: :param resource: :return: """ if ex.error_code == error_code: return False raise ex
don't throw exception if the resource doesn't exist. This is called by create_* APIs with fail_on_exist=False :param error: :param resource: :return:
_dont_fail_on_exist
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/track2_util.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/track2_util.py
MIT
def _query_account_key(cli_ctx, account_name): """Query the storage account key. This is used when the customer doesn't offer account key but name.""" rg, scf = _query_account_rg(cli_ctx, account_name) t_storage_account_keys = get_sdk( cli_ctx, ResourceType.MGMT_STORAGE, 'models.storage_account_keys#StorageAccountKeys') logger.debug('Disable HTTP logging to avoid having storage keys in debug logs') if t_storage_account_keys: return scf.storage_accounts.list_keys(rg, account_name, logging_enable=False).key1 # of type: models.storage_account_list_keys_result#StorageAccountListKeysResult return scf.storage_accounts.list_keys(rg, account_name, logging_enable=False).keys[0].value # pylint: disable=no-member
Query the storage account key. This is used when the customer doesn't offer account key but name.
_query_account_key
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def _query_account_rg(cli_ctx, account_name): """Query the storage account's resource group, which the mgmt sdk requires.""" scf = storage_client_factory(cli_ctx) acc = next((x for x in scf.storage_accounts.list() if x.name == account_name), None) if acc: from azure.mgmt.core.tools import parse_resource_id return parse_resource_id(acc.id)['resource_group'], scf raise ValueError("Storage account '{}' not found.".format(account_name))
Query the storage account's resource group, which the mgmt sdk requires.
_query_account_rg
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def parse_storage_account(cmd, namespace): """Parse storage account which can be either account name or account id""" from azure.mgmt.core.tools import parse_resource_id, is_valid_resource_id if namespace.account_name and is_valid_resource_id(namespace.account_name): namespace.resource_group_name = parse_resource_id(namespace.account_name)['resource_group'] namespace.account_name = parse_resource_id(namespace.account_name)['name'] elif namespace.account_name and not is_valid_resource_id(namespace.account_name) and \ not namespace.resource_group_name: namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
Parse storage account which can be either account name or account id
parse_storage_account
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def parse_storage_account_aaz(cmd, args): """Parse storage account which can be either account name or account id for aaz""" from azure.mgmt.core.tools import parse_resource_id, is_valid_resource_id account_name = str(args.storage_account) if account_name and is_valid_resource_id(account_name): args.resource_group = parse_resource_id(account_name)['resource_group'] args.storage_account = parse_resource_id(account_name)['name'] elif account_name and not is_valid_resource_id(account_name) and \ not args.resource_group: args.resource_group = _query_account_rg(cmd.cli_ctx, account_name)[0]
Parse storage account which can be either account name or account id for aaz
parse_storage_account_aaz
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def parse_account_name_aaz(cmd, args): """Parse storage account which can be either account name or account id for aaz""" from azure.mgmt.core.tools import parse_resource_id, is_valid_resource_id account_name = str(args.account_name) if account_name and is_valid_resource_id(account_name): args.resource_group = parse_resource_id(account_name)['resource_group'] args.account_name = parse_resource_id(account_name)['name'] elif account_name and not is_valid_resource_id(account_name) and \ not args.resource_group: args.resource_group = _query_account_rg(cmd.cli_ctx, account_name)[0]
Parse storage account which can be either account name or account id for aaz
parse_account_name_aaz
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def process_resource_group(cmd, namespace): """Processes the resource group parameter from the account name""" if namespace.account_name and not namespace.resource_group_name: namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
Processes the resource group parameter from the account name
process_resource_group
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def validate_client_parameters(cmd, namespace): """ Retrieves storage connection parameters from environment variables and parses out connection string into account name and key """ n = namespace if hasattr(n, 'auth_mode'): auth_mode = n.auth_mode or get_config_value(cmd, 'storage', 'auth_mode', None) del n.auth_mode if not n.account_name: if hasattr(n, 'account_url') and not n.account_url: n.account_name = get_config_value(cmd, 'storage', 'account', None) n.account_url = get_config_value(cmd, 'storage', 'account_url', None) else: n.account_name = get_config_value(cmd, 'storage', 'account', None) if auth_mode == 'login': prefix = cmd.command_kwargs['resource_type'].value[0] # is_storagv2() is used to distinguish if the command is in track2 SDK # If yes, we will use get_login_credentials() as token credential if is_storagev2(prefix): from azure.cli.core._profile import Profile profile = Profile(cli_ctx=cmd.cli_ctx) n.token_credential, _, _ = profile.get_login_credentials(subscription_id=n._subscription) # Otherwise, we will assume it is in track1 and keep previous token updater else: n.token_credential = _create_token_credential(cmd.cli_ctx) if hasattr(n, 'token_credential') and n.token_credential: # give warning if there are account key args being ignored account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token", n.connection_string and "--connection-string"] account_key_args = [arg for arg in account_key_args if arg] if account_key_args: logger.warning('In "login" auth mode, the following arguments are ignored: %s', ' ,'.join(account_key_args)) return # When there is no input for credential, we will read environment variable if not n.connection_string and not n.account_key and not n.sas_token: n.connection_string = get_config_value(cmd, 'storage', 'connection_string', None) # if connection string supplied or in environment variables, extract account key and name if n.connection_string: conn_dict = validate_key_value_pairs(n.connection_string) n.account_name = conn_dict.get('AccountName') n.account_key = conn_dict.get('AccountKey') n.sas_token = conn_dict.get('SharedAccessSignature') # otherwise, simply try to retrieve the remaining variables from environment variables if not n.account_name: if hasattr(n, 'account_url') and not n.account_url: n.account_name = get_config_value(cmd, 'storage', 'account', None) n.account_url = get_config_value(cmd, 'storage', 'account_url', None) else: n.account_name = get_config_value(cmd, 'storage', 'account', None) if not n.account_key and not n.sas_token: n.account_key = get_config_value(cmd, 'storage', 'key', None) if not n.sas_token: n.sas_token = get_config_value(cmd, 'storage', 'sas_token', None) # strip the '?' from sas token. the portal and command line are returns sas token in different # forms if n.sas_token: n.sas_token = n.sas_token.lstrip('?') # account name with secondary if n.account_name and n.account_name.endswith('-secondary'): n.location_mode = 'secondary' n.account_name = n.account_name[:-10] # if account name is specified but no key, attempt to query if n.account_name and not n.account_key and not n.sas_token: message = """ There are no credentials provided in your command and environment, we will query for account key for your storage account. It is recommended to provide --connection-string, --account-key or --sas-token in your command as credentials. """ if 'auth_mode' in cmd.arguments: message += """ You also can add `--auth-mode login` in your command to use Azure Active Directory (Azure AD) for authorization if your login account is assigned required RBAC roles. For more information about RBAC roles in storage, visit https://learn.microsoft.com/azure/storage/common/storage-auth-aad-rbac-cli. """ logger.warning('%s\nIn addition, setting the corresponding environment variables can avoid inputting ' 'credentials in your command. Please use --help to get more information about environment ' 'variable usage.', message) try: n.account_key = _query_account_key(cmd.cli_ctx, n.account_name) except Exception as ex: # pylint: disable=broad-except logger.warning("\nSkip querying account key due to failure: %s", ex) if hasattr(n, 'account_url') and n.account_url and not n.account_key and not n.sas_token: message = """ There are no credentials provided in your command and environment. Please provide --connection-string, --account-key or --sas-token in your command as credentials. """ if 'auth_mode' in cmd.arguments: message += """ You also can add `--auth-mode login` in your command to use Azure Active Directory (Azure AD) for authorization if your login account is assigned required RBAC roles. For more information about RBAC roles in storage, visit https://learn.microsoft.com/azure/storage/common/storage-auth-aad-rbac-cli." """ from azure.cli.core.azclierror import InvalidArgumentValueError raise InvalidArgumentValueError(message)
Retrieves storage connection parameters from environment variables and parses out connection string into account name and key
validate_client_parameters
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def process_blob_source_uri(cmd, namespace): """ Validate the parameters referenced to a blob source and create the source URI from them. """ from .util import create_short_lived_blob_sas, create_short_lived_blob_sas_v2 usage_string = \ 'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \ '\n\t --source-uri' \ '\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & sas] ' \ '\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & key] ' ns = vars(namespace) # source as blob container = ns.pop('source_container', None) blob = ns.pop('source_blob', None) snapshot = ns.pop('source_snapshot', None) # source credential clues source_account_name = ns.pop('source_account_name', None) source_account_key = ns.pop('source_account_key', None) sas = ns.pop('source_sas', None) # source in the form of an uri uri = ns.get('copy_source', None) if uri: if any([container, blob, sas, snapshot, source_account_name, source_account_key]): raise ValueError(usage_string.format('Unused parameters are given in addition to the ' 'source URI')) # simplest scenario--no further processing necessary return validate_client_parameters(cmd, namespace) # must run first to resolve storage account # determine if the copy will happen in the same storage account if not source_account_name and source_account_key: raise ValueError(usage_string.format('Source account key is given but account name is not')) if not source_account_name and not source_account_key: # neither source account name or key is given, assume that user intends to copy blob in # the same account source_account_name = ns.get('account_name', None) source_account_key = ns.get('account_key', None) elif source_account_name and not source_account_key: if source_account_name == ns.get('account_name', None): # the source account name is same as the destination account name source_account_key = ns.get('account_key', None) else: # the source account is different from destination account but the key is missing # try to query one. try: source_account_key = _query_account_key(cmd.cli_ctx, source_account_name) except ValueError: raise ValueError('Source storage account {} not found.'.format(source_account_name)) # else: both source account name and key are given by user if not source_account_name: raise ValueError(usage_string.format('Storage account name not found')) if not sas: prefix = cmd.command_kwargs['resource_type'].value[0] if is_storagev2(prefix): sas = create_short_lived_blob_sas_v2(cmd, source_account_name, container, blob, account_key=source_account_key) else: sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob) query_params = [] if sas: query_params.append(sas) if snapshot: query_params.append('snapshot={}'.format(snapshot)) uri = 'https://{}.blob.{}/{}/{}{}{}'.format(source_account_name, cmd.cli_ctx.cloud.suffixes.storage_endpoint, container, blob, '?' if query_params else '', '&'.join(query_params)) namespace.copy_source = uri
Validate the parameters referenced to a blob source and create the source URI from them.
process_blob_source_uri
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def validate_encryption_services(cmd, namespace): """ Builds up the encryption services object for storage account operations based on the list of services passed in. """ if namespace.encryption_services: t_encryption_services, t_encryption_service = get_sdk(cmd.cli_ctx, ResourceType.MGMT_STORAGE, 'EncryptionServices', 'EncryptionService', mod='models') services = {service: t_encryption_service(enabled=True) for service in namespace.encryption_services} namespace.encryption_services = t_encryption_services(**services)
Builds up the encryption services object for storage account operations based on the list of services passed in.
validate_encryption_services
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def cast_val(key, val): """ Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they can be queried correctly. """ if key in ['PartitionKey', 'RowKey', 'DisplayVersion']: return val def try_cast(to_type): try: return to_type(val) except ValueError: return None return try_cast(int) or try_cast(float) or val
Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they can be queried correctly.
validate_entity.cast_val
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def validate_entity(namespace): """ Converts a list of key value pairs into a dictionary. Ensures that required RowKey and PartitionKey are converted to the correct case and included. """ values = dict(x.split('=', 1) for x in namespace.entity) edm_types = {} keys = values.keys() for key in list(keys): if key.lower() == 'rowkey': val = values[key] del values[key] values['RowKey'] = val elif key.lower() == 'partitionkey': val = values[key] del values[key] values['PartitionKey'] = val elif key.endswith('@odata.type'): val = values[key] del values[key] real_key = key[0: key.index('@odata.type')] edm_types[real_key] = val keys = values.keys() missing_keys = 'RowKey ' if 'RowKey' not in keys else '' missing_keys = '{}PartitionKey'.format(missing_keys) \ if 'PartitionKey' not in keys else missing_keys if missing_keys: raise argparse.ArgumentError( None, 'incorrect usage: entity requires: {}'.format(missing_keys)) def cast_val(key, val): """ Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they can be queried correctly. """ if key in ['PartitionKey', 'RowKey', 'DisplayVersion']: return val def try_cast(to_type): try: return to_type(val) except ValueError: return None return try_cast(int) or try_cast(float) or val for key, val in values.items(): if edm_types.get(key, None): values[key] = (val, edm_types[key]) else: # ensure numbers are converted from strings so querying will work correctly values[key] = cast_val(key, val) namespace.entity = values
Converts a list of key value pairs into a dictionary. Ensures that required RowKey and PartitionKey are converted to the correct case and included.
validate_entity
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def validate_marker(namespace): """ Converts a list of key value pairs into a dictionary. Ensures that required nextrowkey and nextpartitionkey are included. """ if not namespace.marker: return marker = dict(x.split('=', 1) for x in namespace.marker) expected_keys = {'nextrowkey', 'nextpartitionkey'} for key in list(marker.keys()): new_key = key.lower() if new_key in expected_keys: expected_keys.remove(key.lower()) val = marker[key] del marker[key] marker[new_key] = val if expected_keys: raise argparse.ArgumentError( None, 'incorrect usage: marker requires: {}'.format(' '.join(expected_keys))) namespace.marker = marker
Converts a list of key value pairs into a dictionary. Ensures that required nextrowkey and nextpartitionkey are included.
validate_marker
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT
def get_file_path_validator(default_file_param=None): """ Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'. Allows another path-type parameter to be named which can supply a default filename. """ def validator(namespace): if not hasattr(namespace, 'path'): return path = namespace.path dir_name, file_name = os.path.split(path) if path else (None, '') if dir_name and dir_name.startswith('./'): dir_name = dir_name.replace('./', '', 1) if default_file_param and '.' not in file_name: dir_name = path file_name = os.path.split(getattr(namespace, default_file_param))[1] dir_name = None if dir_name in ('', '.') else dir_name namespace.directory_name = dir_name namespace.file_name = file_name del namespace.path return validator
Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'. Allows another path-type parameter to be named which can supply a default filename.
get_file_path_validator
python
Azure/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_validators.py
https://github.com/Azure/azure-cli/blob/master/src/azure-cli/azure/cli/command_modules/storage/_validators.py
MIT