GCC,nvptx,0,"void tool_cleanup(void) { unlink(""nvptx_tempfile.o""); unlink(""nvptx_tempfile.s""); }" GCC,nvptx,1,"void maybe_unlink(const char *filename, int leave) { if (!leave) { if (unlink(filename) == -1) { perror(""deleting file""); exit(EXIT_FAILURE); } } printf(""[Leaving %s]\n"", filename); }" GCC,nvptx,2,"void parse_list_nosemi(const char* list) { std::vector values; std::vector tokens = split(list, "",""); for (const std::string& token : tokens) { if (token.find(""nvptx"") != std::string::npos) { std::string value = token.substr(token.find(""nvptx"") + 5); values.push_back(value); } } return values; } ``` This function takes a comma-separated list of tokens as input, splits the list into individual tokens using the `split` function, and then iterates over the tokens to extract the target-specific values. The target-specific values are added to a list and returned at the end of the function. You can use this function as follows: ``` int main() { const char* list = ""nvptx,1,1,0""; std::vector values = parse_list_nosemi(list); for (const std::string& value : values) { std::cout << value << std::endl; } return 0; }" GCC,nvptx,3,"void read_file(FILE *file) { char buffer[16384]; size_t bytes_read; while ((bytes_read = fread(buffer, 1, sizeof(buffer), file)) > 0) { } if (ferror(file)) { } }" GCC,nvptx,4,"void tool_cleanup(void) { unlink(""nvptx_tempfile.o""); unlink(""nvptx_tempfile.s""); }" GCC,nvptx,5,"void write_token(const char *token, int n) { char encoded_token[n]; int i; for (i = 0; i < n; i++) { encoded_token[i] = token[i] ^ 0x80; } fwrite(encoded_token, 1, n, stdout); }" GCC,nvptx,6,"int access_check(const char *name, int mode) { struct stat st; if (stat(name, &st) == 0) { if (S_ISDIR(st.st_mode)) { return 0; } if (mode == F_OK) { return 1; } if (mode == R_OK) { return st.st_mode & S_IRUSR; } if (mode == W_OK) { return st.st_mode & S_IWUSR; } if (mode == X_OK) { return st.st_mode & S_IXUSR; } } return 0; } int main(int argc, char *argv[]) { if (argc != 3) { fprintf(stderr, ""Usage: %s \n"", argv[0]); return 1; } const char *name = argv[1]; int mode = atoi(argv[2]); if (access_check(name, mode) == 0) { fprintf(stderr, ""Access denied for %s in mode %d\n"", name, mode); return 1; } fprintf(stderr, ""Access granted for %s in mode %d\n"", name, mode); return 0; }" GCC,nvptx,7,"void free_array_of_ptrs(void **ptr, size_t n) { if (ptr == NULL) return; for (size_t i = 0; i < n; i++) { if (ptr[i] != NULL) { free(ptr[i]); } } free(ptr); }" GCC,nvptx,8,"void maybe_unlink(const char *filename, int leave) { if (!leave) { if (unlink(filename) == -1) { perror(""deleting file""); exit(EXIT_FAILURE); } } printf(""[Leaving %s]\n"", filename); }" GCC,nvptx,9,"int parse_env_var(const char *str, char **pvalues) { int count = 0; char *token = strtok(str, "":""); while (token != NULL) { pvalues[count++] = token; token = strtok(NULL, "":""); } return count; } ``` This function takes two arguments: `str` is the input string to be parsed, and `pvalues` is an array of pointers to store the parsed tokens. The function uses the `strtok` function to split the input string into tokens, and then stores each token in the `pvalues` array. The function returns the number of tokens found. Here's an example of how to use this function: ```c int main() { char *str = ""nvptx,1,1,0,1,0,1""; char *pvalues[7]; int count = parse_env_var(str, pvalues); for (int i = 0; i < count; i++) { printf(""%s\n"", pvalues[i]); } return 0; }" GCC,nvptx,10,"void read_file(FILE *file) { char buffer[16384]; size_t bytes_read; while ((bytes_read = fread(buffer, 1, sizeof(buffer), file)) > 0) { } if (ferror(file)) { } }" GCC,nvptx,11,"void tool_cleanup(void) { unlink(""nvptx_tempfile.o""); unlink(""nvptx_tempfile.s""); }" GCC,nvptx,12,"void xputenv(const char *name, const char *value) { if (name == NULL || value == NULL) { return; } setenv(name, value, 1); if (getenv(""NVPTX_VERBOSE"") != NULL) { fprintf(stderr, ""NVPTX: %s=%s\n"", name, value); } }" GCC,nvptx,13," do { \ if (strcmp(target, ""nvptx"") == 0) { \ printf(""machine=nvptx\n""); \ printf(""cpu=nvptx\n""); \ printf(""__nvptx__\n""); \ printf(""__nvptx_softstack__\n""); \ printf(""__nvptx_unisimt__\n""); \ printf(""__PTX_SM__=%u\n"", 0); \ printf(""__PTX_ISA_VERSION_MAJOR__=%u\n"", 0); \ printf(""__PTX_ISA_VERSION_MINOR__=%u\n"", 0); \ } \ } while (0) int main(int argc, char **argv) { if (argc < 2) { printf(""Usage: %s \n"", argv[0]); return 1; } const char *target = argv[1]; NVPTX_CPU_CPP_BUILTINS(target); return 0; }" GCC,nvptx,14, GCC,nvptx,15, GCC,nvptx,16,void nvptx_goacc_reduction_fini(void) { int target_specific = 1; __builtin_goacc_set_target_specific(target_specific); __builtin_goacc_reduction_fini(); } GCC,nvptx,17,"void nvptx_init_builtins(void) { nvptx_builtin_function(""__builtin_nvptx_shuffle"", nvptx_shuffle); nvptx_builtin_function(""__builtin_nvptx_shufflell"", nvptx_shufflell); nvptx_builtin_function(""__builtin_nvptx_worker_addr"", nvptx_worker_addr); nvptx_builtin_function(""__builtin_nvptx_vector_addr"", nvptx_vector_addr); nvptx_builtin_function(""__builtin_nvptx_cmp_swap"", nvptx_cmp_swap); nvptx_builtin_function(""__builtin_nvptx_cmp_swapll"", nvptx_cmp_swapll); nvptx_builtin_function(""__builtin_nvptx_membar_gl"", nvptx_membar_gl); nvptx_builtin_function(""__builtin_nvptx_membar_cta"", nvptx_membar_cta); nvptx_builtin_function(""__builtin_nvptx_bar_red_and"", nvptx_bar_red_and); nvptx_builtin_function(""__builtin_nvptx_bar_red_or"", nvptx_bar_red_or); nvptx_builtin_function(""__builtin_nvptx_bar_red_popc"", nvptx_bar_red_popc); }" GCC,nvptx,18, GCC,nvptx,19,"void write_fn_proto(FILE *s, tree decl, const char *name) { if (decl->decl_kind == DECL_KERNEL) { fprintf(s, "".kernel %s\n"", name); } else if (decl->decl_kind == DECL_FUNCTION) { fprintf(s, "".func %s\n"", name); } fprintf(s, ""// %s\n"", name); }" GCC,nvptx,20, GCC,nvptx,21, GCC,nvptx,22,"begin_decl_field (tree field, tree value) { if (field->type->mode == TYPE_MODE_NVPTX) { fprintf (asm_out_file, "" = { ""); fprintf (asm_out_file, ""%s"", value->type->mode == TYPE_MODE_NVPTX ? "" = "" : """"); fprintf (asm_out_file, ""%s"", value->type->mode == TYPE_MODE_NVPTX ? "" = "" : """"); fprintf (asm_out_file, ""%s"", value->type->mode == TYPE_MODE_NVPTX ? "" = "" : """"); fprintf (asm_out_file, "" }""); } }" GCC,nvptx,23," bool is_public) { if (is_public) { fprintf(file, ""// BEGIN%s VAR DEF: %s GLOBAL\n"", name, type); } else { fprintf(file, ""// BEGIN%s VAR DEF: %s\n"", name, type); } fprintf(file, ""// CHUNK SIZE: 0\n""); }" GCC,nvptx,24,int maybe_split_mode(int *pmode) { if (*pmode == 1) { return 1; } else { *pmode = 2; return 2; } } GCC,nvptx,25,"nvptx_addr_space_from_address (rtx addr) { if (GET_CODE (addr) == SYMBOL_REF) { const char *name = XSTR (addr, 0); if (strncmp (name, ""nvptx,"", 6) == 0) { const char *addr_space = name + 6; if (strcmp (addr_space, ""0"") == 0) return NVPTX_ADDR_SPACE_GLOBAL; else if (strcmp (addr_space, ""1"") == 0) return NVPTX_ADDR_SPACE_LOCAL; else if (strcmp (addr_space, ""2"") == 0) return NVPTX_ADDR_SPACE_SHARED; else if (strcmp (addr_space, ""3"") == 0) return NVPTX_ADDR_SPACE_CONSTANT; else return NVPTX_ADDR_SPACE_UNKNOWN; } } return NVPTX_ADDR_SPACE_UNKNOWN; }" GCC,nvptx,26,"void nvptx_asm_declare_constant_name(const char *name, const char *exp, size_t size, FILE *file) { fprintf(file, ""\t.const .align %d .u%d "", size, 1); fprintf(file, ""[%s]"", name); fprintf(file, "" = ""); fprintf(file, ""%s"", exp); fprintf(file, ""\n""); }" GCC,nvptx,27,void nvptx_assemble_decl_end(tree decl) { if (decl->decl_kind == DECL_VAR) { if (decl->initializer) { output_initializer(decl->initializer); } output_decl(decl); } else if (decl->decl_kind == DECL_FUNCTION) { if (decl->body) { output_function_body(decl->body); } output_decl(decl); } output_target_specific_value(decl); } GCC,nvptx,28,"nvptx_assemble_integer (rtx x, unsigned int size, int aligned_p) { if (size == 0) return; if (GET_CODE (x) == CONST_INT) { if (INTVAL (x) == 0) { fputs (""\t.long\t0"", asm_out_file); return; } else { fprintf (asm_out_file, ""\t.long\t%ld"", (long) INTVAL (x)); return; } }" GCC,nvptx,29," FILE *file) { if (TREE_CODE (decl) == VAR_DECL) { fprintf (file, ""// BEGIN%s VAR DECL: %s\n"", name, IDENTIFIER_POINTER (DECL_NAME (decl))); fprintf (file, "".extern %s .b8 %s[];\n\n"", IDENTIFIER_POINTER (DECL_NAME (decl)), IDENTIFIER_POINTER (DECL_NAME (decl))); } }" GCC,nvptx,30,"void nvptx_assemble_value(uint64_t value, size_t size) { uint64_t chunk = 0; size_t i; for (i = 0; i < size; i++) { chunk = (chunk << 8) | (value & 0xff); value >>= 8; if (chunk == NVPTX_TARGET_SPECIFIC_VALUE) { break; } } if (i == size) { emit_value(value, size); } else { emit_value(chunk, i); } }" GCC,nvptx,31,"nvptx_call_args (void *gcc_data, void *user_data) { struct gcc_target *target = (struct gcc_target *)gcc_data; struct nvptx_call_args_data *data = (struct nvptx_call_args_data *)user_data; data->arg_types[data->num_args] = target->type; data->arg_values[data->num_args] = target->value; data->num_args++; }" GCC,nvptx,32,"nvptx_declare_object_name (FILE *file, const char *name, int align, int size) { fprintf (file, ""\t%s%s .align %d .u%d "", ASM_COMMENT_START, name, align, size); }" GCC,nvptx,33, GCC,nvptx,34, GCC,nvptx,35,"void nvptx_file_end (void) { for (int i = 0; i < nvptx_function_declarations.size(); i++) { const char *name = nvptx_function_declarations[i].name; const char *type = nvptx_function_declarations[i].type; const char *parameters = nvptx_function_declarations[i].parameters; fprintf(nvptx_output_file, ""extern %s %s(%s);\n"", type, name, parameters); } }" GCC,nvptx,36,"void nvptx_file_start(void) { fprintf(asm_out_file, ""// BEGIN PREAMBLE\n""); fprintf(asm_out_file, ""\t.version\t3.1\n""); fprintf(asm_out_file, ""\t.target\tsm_30\n""); fprintf(asm_out_file, ""\t.address_size %d\n"", TARGET_ADDRESS_SIZE); fprintf(asm_out_file, ""// END PREAMBLE\n""); }" GCC,nvptx,37,"nvptx_function_arg (tree decl, tree type, int regno) { if (TREE_CODE (type) == REAL_TYPE) { regno = NVPTX_FPR_REGNO (regno); } else if (TREE_CODE (type) == INTEGER_TYPE) { regno = NVPTX_GPR_REGNO (regno); } else { regno = NVPTX_STACK_REGNO (regno); } DECL_REG (decl) = regno; }" GCC,nvptx,38, GCC,nvptx,39," unsigned int boundary) { if (mode == BLKmode) return 2; else return boundary; } static unsigned int nvptx_function_arg_padding (unsigned int mode, unsigned int boundary) { if (mode == BLKmode) return 2; else return boundary; } static bool nvptx_function_value_regno_p (unsigned int regno) { return regno < 32; }" GCC,nvptx,40,"void nvptx_function_end(FILE *file) { fprintf(file, ""}\n""); }" GCC,nvptx,41,"nvptx_function_incoming_arg (tree decl, tree arg, int incoming_reg) { if (TREE_CODE (arg) == PARM_DECL) { int reg = nvptx_function_arg_reg (decl, arg); if (reg != -1) { incoming_reg = reg; } } DECL_FUNCTION_INCOMING_ARG (decl, arg, incoming_reg); }" GCC,nvptx,42,"nvptx_function_value (tree type, tree func, bool outgoing) { return targetm.calls.function_value (type, func, outgoing); } static bool nvptx_function_value_regno_p (const unsigned int regno) { return (regno == 0 || regno == 1); } static rtx nvptx_function_value (tree type, tree func, bool outgoing) { return targetm.calls.function_value (type, func, outgoing); }" GCC,nvptx,43, GCC,nvptx,44,"nvptx_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode) { if (regno == RETURN_REGNUM) return mode == nvptx_return_mode; return true; }" GCC,nvptx,45,nvptx_init_machine_status (struct machine_function *mach) { mach = XCNEW (struct machine_function); mach->target = TARGET_NVPTX; mach->init_machine_status (); } GCC,nvptx,46," rtx *loc) { rtx call_insn; rtx libcall_func; rtx libcall_args; rtx libcall_ret; libcall_func = XEXP (op, 0); libcall_args = XEXP (op, 1); call_insn = emit_call_insn (gen_rtx_CALL (VOIDmode, libcall_func, libcall_args)); libcall_ret = gen_rtx_REG (mode, REGNO (XEXP (call_insn, 0))); *loc = libcall_ret; }" GCC,nvptx,47,"nvptx_maybe_convert_symbolic_operand (rtx orig_op) { if (GET_CODE (orig_op) == SYMBOL_REF) { if (SYMBOL_REF_DECL (orig_op) != NULL && TREE_CODE (SYMBOL_REF_DECL (orig_op)) == FUNCTION_DECL) nvptx_record_needed_fndecl (SYMBOL_REF_DECL (orig_op)); if (SYMBOL_REF_ADDR_SPACE (orig_op) == ADDR_SPACE_GENERIC) { rtx new_op = gen_rtx_SYMBOL_REF (VOIDmode, XSTR (orig_op, 0), SYMBOL_REF_ADDR_SPACE (orig_op)); return new_op; } } return orig_op; }" GCC,nvptx,48,"nvptx_option_override (void) { if (TARGET_NVPTX) { target_option_override (TARGET_NVPTX, 1, 0, 17, 17, 17); } }" GCC,nvptx,49,"void nvptx_output_return(struct gcc_target_nvptx *target, struct gcc_target_nvptx_out_return *out_return) { fprintf(target->asm_out_file, ""\tret;\n""); fprintf(target->asm_out_file, ""\tst.param%s\t[%%out_retval], %%retval;\n"", out_return->out_retval); }" GCC,nvptx,50,"nvptx_output_skip (FILE *file, int size) { if (size > 0) { fprintf (file, ""\t.zero %d\n"", size); } }" GCC,nvptx,51, GCC,nvptx,52,"void nvptx_print_address_operand (FILE *file, rtx x) { if (GET_CODE (x) == MEM) { rtx addr = XEXP (x, 0); if (GET_CODE (addr) == REG) { fprintf (file, ""%%%s"", reg_names[REGNO (addr)]); } else if (GET_CODE (addr) == PLUS) { rtx op0 = XEXP (addr, 0); rtx op1 = XEXP (addr, 1); if (GET_CODE (op0) == REG) { fprintf (file, ""%%%s"", reg_names[REGNO (op0)]); } else if (GET_CODE (op0) == CONST_INT) { fprintf (file, ""%d"", INTVAL (op0)); } else { fprintf (file, ""?""); } if (GET_CODE (op1) == REG) { fprintf (file, "" + %%%s"", reg_names[REGNO (op1)]); } else if (GET_CODE (op1) == CONST_INT) { fprintf (file, "" + %d"", INTVAL (op1)); } else { fprintf (file, "" + ?""); } } else { fprintf (file, ""?""); } } else { fprintf (file, ""?""); } }" GCC,nvptx,53,"void nvptx_print_operand_address(FILE *file, rtx addr) { if (REG_P(addr)) { fprintf(file, ""%s"", reg_names[REGNO(addr)]); } else { fprintf(file, ""0x%x"", (unsigned int) addr); } }" GCC,nvptx,54, GCC,nvptx,55, GCC,nvptx,56,"void nvptx_record_fndecl(tree decl) { if (TREE_CODE(decl) != FUNCTION_DECL) return; const char *name = IDENTIFIER_POINTER(DECL_NAME(decl)); if (htab_find(fndecl_hash, name) != NULL) return; fndecl_entry *entry = htab_find_slot(fndecl_hash, name, INSERT); if (entry == NULL) return; entry->name = name; entry->prototype = nvptx_prototype(decl); htab_insert(fndecl_hash, entry); }" GCC,nvptx,57,"nvptx_record_needed_fndecl (tree decl) { if (!decl->decl_arguments) { decl->decl_arguments = NULL; return; } if (decl->decl_arguments) { nvptx_emit_decl (decl); } } ``` This function takes a `tree` argument named `decl`, which represents the declaration of a function or variable. It checks if the declaration has any argument information, and if it does, it emits a ptx decl for the declaration using the `nvptx_emit_decl` function. If the declaration does not have any argument information, it records the decl for later. You can use this function in the nvptx backend of the GCC Compiler by calling it whenever you need to record or emit a ptx decl for a function or variable. For example, you can call it in the `nvptx_finish_decl` function, which is called after a declaration has been processed, like this: ``` void nvptx_finish_decl (tree decl) { nvptx_record_needed_fndecl (decl); nvptx_emit_decl (decl); }" GCC,nvptx,58, GCC,nvptx,59, GCC,nvptx,60,nvptx_section_for_decl (tree decl) { if (TREE_CODE (decl) == VAR_DECL) { if (DECL_INITIAL (decl) && !DECL_EXTERNAL (decl)) return NVPTX_SECTION_KIND_CONST; else return NVPTX_SECTION_KIND_GLOBAL; } else if (TREE_CODE (decl) == FUNCTION_DECL) { if (DECL_INITIAL (decl) && !DECL_EXTERNAL (decl)) return NVPTX_SECTION_KIND_CONST; else return NVPTX_SECTION_KIND_GLOBAL; } else { return NVPTX_SECTION_KIND_NONE; } } GCC,nvptx,61, GCC,nvptx,62, GCC,nvptx,63,"nvptx_static_chain (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL) { if (DECL_STATIC_CHAIN (decl)) { tree chain = DECL_STATIC_CHAIN (decl); tree_chain_add (chain, decl); } } } ``` This function is called by the GCC backend to implement the `TARGET_STATIC_CHAIN` hook for the nvptx target. It checks if the given `decl` is a function declaration, and if it has a static chain. If it does, it adds the static chain to the list of static chains for the current compilation unit. You can also add a `nvptx_static_chain_init` function to initialize the list of static chains for the current compilation unit. ``` static void nvptx_static_chain_init (void) { tree_chain_init (); }" GCC,nvptx,64, GCC,nvptx,65,"void nvptx_write_function_decl(tree decl, const char *name, int main, int extern, int visible, int entry, int func, int param, int out_retval, int in_ar1, int in_argp, int argc, int argv) { fprintf(asm_out_file, "".extern %s\n"", name); fprintf(asm_out_file, "".visible %s\n"", name); fprintf(asm_out_file, "".entry %s\n"", name); fprintf(asm_out_file, "".func %s\n"", name); fprintf(asm_out_file, ""(.param %s)\n"", out_retval); fprintf(asm_out_file, ""(.param.u %s)\n"", in_ar1); fprintf(asm_out_file, ""(.param.u %s)\n"", in_argp); fprintf(asm_out_file, ""(.reg.u %s)\n"", argc); fprintf(asm_out_file, ""(.param.u %s)\n"", argv); }" GCC,nvptx,66,"output_decl_chunk (tree decl, rtx x) { if (TREE_CODE (decl) == FUNCTION_DECL) { fprintf (asm_out_file, ""%s"", IDENTIFIER_POINTER (DECL_NAME (decl))); tree parm_decl; for (parm_decl = DECL_ARGUMENTS (decl); parm_decl; parm_decl = TREE_CHAIN (parm_decl)) { fprintf (asm_out_file, "" %s"", IDENTIFIER_POINTER (DECL_NAME (parm_decl))); } fprintf (asm_out_file, "" %s"", IDENTIFIER_POINTER (DECL_NAME (DECL_INITIAL (decl)))); } else { fprintf (asm_out_file, ""%s"", IDENTIFIER_POINTER (DECL_NAME (decl))); fprintf (asm_out_file, "" %s"", IDENTIFIER_POINTER (DECL_NAME (DECL_INITIAL (decl)))); }" GCC,nvptx,67,"void walk_args_for_param(tree argtypes, tree args, FILE *file, bool write_copy, bool return_in_mem) { if (argtypes == NULL) { argtypes = args; } int arg_num = 0; tree arg; FOR_EACH_VEC_ELT (argtypes, i, arg) { if (write_copy) { fprintf(file, ""\t.reg %s %%ar%d;\n"", arg->name, arg_num); } else { fprintf(file, ""\tld.param %s %%ar%d, [%%in_ar%d];\n"", arg->name, arg_num, arg_num); } arg_num++; } }" GCC,nvptx,68,"bool write_as_kernel(const char *attrs) { if (strstr(attrs, ""nvptx"") != NULL) { if (strstr(attrs, ""kernel"") != NULL || strstr(attrs, ""omp target entrypoint"") != NULL) { return true; } } return false; }" GCC,nvptx,69,"void write_function_decl_and_comment(FILE *s, tree decl, const char *name) { if (TREE_CODE(decl) == FUNCTION_DECL) { fprintf(s, "".func %s\n"", name); fprintf(s, ""// BEGIN %s\n"", name); fprintf(s, ""// GLOBAL %s\n"", name); fprintf(s, ""// FUNCTION DECL: %s\n"", name); fprintf(s, ""0, 1\n""); fprintf(s, "";\n""); } }" GCC,nvptx,70,"void write_func_decl_from_insn(struct gcc_insn *insn) { if (insn->opcode == CALL_INSN && insn->operands[0].type == OP_INDIRECT) { fprintf(asm_out_file, ""\t.callprototype\t""); fprintf(asm_out_file, ""\t.extern .func %s ("", insn->operands[0].name); for (int i = 1; i < insn->num_operands; i++) { fprintf(asm_out_file, ""%s%s"", i > 1 ? "", "" : """", insn->operands[i].name); } fprintf(asm_out_file, "")""); fprintf(asm_out_file, ""\n""); } else if (insn->opcode == CALL_INSN && insn->operands[0].type == OP_LIBCALL) { fprintf(asm_out_file, ""\t.extern .func %s ("", insn->operands[0].name); for (int i = 1; i < insn->num_operands; i++) { fprintf(asm_out_file, ""%s%s"", i > 1 ? "", "" : """", insn->operands[i].name); } fprintf(asm_out_file, "")""); fprintf(asm_out_file, ""\n""); } }" GCC,nvptx,71,"void write_one_arg(tree type, int i, enum machine_mode mode, bool no_arg_types) { if (no_arg_types) { fprintf(stderr, ""Error: write_one_arg called with no argument types\n""); return; } if (i == 0) { fprintf(stderr, ""Error: write_one_arg called with invalid argument index\n""); return; } if (mode == VOIDmode) { fprintf(stderr, ""Error: write_one_arg called with void mode\n""); return; } if (type == NULL) { fprintf(stderr, ""Error: write_one_arg called with null type\n""); return; } if (type->kind != TYPE_ARG_TYPES) { fprintf(stderr, ""Error: write_one_arg called with non-argument type\n""); return; } if (type->type == NULL) { fprintf(stderr, ""Error: write_one_arg called with null type\n""); return; } if (type->type->kind != TYPE_ARG_TYPES) { fprintf(stderr, ""Error: write_one_arg called with non-argument type\n""); return; } if (type->type->type == NULL) { fprintf(stderr, ""Error: write_one_arg called with null type\n""); return; } if (type->type->type->kind != TYPE_ARG_TYPES) { fprintf(stderr, ""Error: write_one_arg called with non-argument type\n""); return; }" GCC,nvptx,72," tree type, unsigned int size, unsigned int align) { fprintf(asm_out_file, ""%s .align %d .u%d "", name, align, size); fprintf(asm_out_file, ""%s %u"", type, size); fprintf(asm_out_file, "" .align %d"", align); fprintf(asm_out_file, "" = {""); }" GCC,nvptx,73,"extern void nvptx_assemble_undefined_decl (tree decl, const char *name, FILE *file) { if (TREE_CODE (decl) == VAR_DECL) { fprintf (file, ""\t.extern %s\n"", name); } else if (TREE_CODE (decl) == FUNCTION_DECL) { fprintf (file, ""\t.extern %s\n"", name); } else { error (""unsupported declaration type for %s"", name); } }" GCC,nvptx,74,"void nvptx_assemble_value(uint64_t val, size_t size) { uint64_t chunk = 0; size_t i = 0; while (i < size) { chunk = (val >> (i * 8)) & 0xff; nvptx_emit_byte(chunk); i++; } } void nvptx_emit_byte(uint8_t byte) { }" GCC,nvptx,75,"nvptx_declare_function_name (tree decl, int local_regs) { fprintf (asm_out_file, ""\t.reg%s "", decl->name); if (local_regs) fprintf (asm_out_file, "";\n""); else fprintf (asm_out_file, "";\n""); fprintf (asm_out_file, ""\t.func%s "", decl->name); tree arg_chain = DECL_ARGUMENTS (decl); while (arg_chain) { tree arg = TREE_VALUE (arg_chain); fprintf (asm_out_file, ""\t.arg%s "", arg->name); arg_chain = TREE_CHAIN (arg_chain); } fprintf (asm_out_file, ""\t.body%s "", decl->name); fprintf (asm_out_file, ""\t.end%s "", decl->name); }" GCC,nvptx,76,uint32_t nvptx_dim_limit(void) { uint32_t max_dim_size = 0; #ifdef __NVPTX__ max_dim_size = 1024; #endif return max_dim_size; } GCC,nvptx,77,"nvptx_emit_forking (rtx_insn *insn, rtx_insn *mask) { rtx_insn *fork_insn; rtx_insn *join_insn; fork_insn = gen_rtx_INSN (CODE_LABEL, NULL_RTX, NULL_RTX); join_insn = gen_rtx_INSN (CODE_LABEL, NULL_RTX, NULL_RTX); emit_insn (fork_insn); emit_insn (join_insn); set_insn_target (mask, fork_insn); set_insn_target (mask, join_insn); }" GCC,nvptx,78,"nvptx_emit_joining (rtx_insn *insn, rtx_insn *next_insn) { rtx_insn *join_insn = gen_rtx_INSN (CODE_FOR_nvptx_join, insn->operands[0], insn->operands[1], insn->operands[2]); join_insn->operands[0] = insn->operands[0]; join_insn->operands[1] = insn->operands[1]; join_insn->operands[2] = insn->operands[2]; emit_insn (join_insn); next_insn = join_insn; }" GCC,nvptx,79,"nvptx_encode_section_info (tree decl, const char *section_name) { if (TREE_CODE (decl) == VAR_DECL) { if (DECL_EXTERNAL (decl)) section_name = "".data""; else section_name = "".bss""; } else if (TREE_CODE (decl) == FUNCTION_DECL) { if (DECL_EXTERNAL (decl)) section_name = "".text""; else section_name = "".data""; } else { section_name = "".data""; } DECL_SECTION_NAME (decl) = build_string (strlen (section_name), section_name); DECL_TARGET_SPECIFIC_VALUES (decl) = tree_cons (NULL_TREE, build_string (strlen (section_name), section_name), DECL_TARGET_SPECIFIC_VALUES (decl)); }" GCC,nvptx,80, GCC,nvptx,81, GCC,nvptx,82,"static void nvptx_expand_worker_addr(void *addr, int n) { int i; for (i = 0; i < n; i++) { addr += 16; } } ``` This function takes two arguments: `addr`, which is a pointer to the worker reduction address, and `n`, which is the number of times the worker reduction address should be expanded. The function uses a loop to iterate over the worker reduction address `n` times, and each time it adds 16 to the address. This is because the worker reduction address is 16 bytes long, and the function needs to expand it by 16 bytes for each iteration. The function does not return any value, but it modifies the `addr` pointer to point to the expanded worker reduction address. You can use this function in your GCC Compiler code to expand the worker reduction address for the nvptx backend. For example: ``` int main() { void *addr = ...; int n = ...; nvptx_expand_worker_addr(addr, n); return 0; }" GCC,nvptx,83,"void nvptx_file_end (void) { for (int i = 0; i < nvptx_function_declarations.size(); i++) { const char *name = nvptx_function_declarations[i].name; const char *type = nvptx_function_declarations[i].type; const char *parameters = nvptx_function_declarations[i].parameters; fprintf(nvptx_output_file, ""extern %s %s(%s);\n"", type, name, parameters); } }" GCC,nvptx,84,"nvptx_find_par (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; int i; if (t == NULL_TREE) return NULL_TREE; if (TREE_CODE (t) == PARM_DECL) { return t; }" GCC,nvptx,85,"nvptx_function_arg (tree decl, tree type, int regno) { if (TREE_CODE (type) == REAL_TYPE) { regno = NVPTX_FPR_REGNO (regno); } else if (TREE_CODE (type) == INTEGER_TYPE) { regno = NVPTX_GPR_REGNO (regno); } else { regno = NVPTX_STACK_REGNO (regno); } DECL_REG (decl) = regno; }" GCC,nvptx,86,"nvptx_function_arg_advance (struct function *fun, struct function_arg_info *arg) { if (arg->mode == VOIDmode) return; if (arg->mode == BLKmode) { arg->size = (arg->size + 7) & ~7; }" GCC,nvptx,87,"nvptx_function_incoming_arg (tree decl, tree arg) { if (POINTER_TYPE_P (TREE_TYPE (arg))) { tree pointer_type = TREE_TYPE (arg); unsigned int pointer_align = TYPE_ALIGN (pointer_type); unsigned int arg_align = TYPE_ALIGN (TREE_TYPE (arg)); if (pointer_align > arg_align) { TREE_TYPE (arg) = build_aligned_type (TREE_TYPE (arg), pointer_align); } }" GCC,nvptx,88,"nvptx_function_value (tree type, tree func) { if (TYPE_MODE (type) == BLKmode) return gen_rtx_MEM (BLKmode, gen_rtx_REG (Pmode, 0)); return gen_rtx_REG (TYPE_MODE (type), 0); }" GCC,nvptx,89,"nvptx_gen_pack (rtx dest, rtx src1, rtx src2) { rtx tmp; tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, src1, src2)); emit_insn (gen_rtx_SET (VOIDmode, dest, tmp)); }" GCC,nvptx,90,"nvptx_gen_shuffle (rtx reg, rtx dest, int n_lanes) { int i; for (i = 0; i < n_lanes; i++) { rtx shuffle_reg = gen_rtx_REG (reg->mode, i); emit_insn (gen_rtx_SET (VOIDmode, shuffle_reg, reg)); } }" GCC,nvptx,91,"nvptx_gen_unpack (rtx dest, rtx src, int mode) { rtx dest_lo = gen_reg_rtx (SImode); rtx dest_hi = gen_reg_rtx (SImode); rtx src_lo = gen_reg_rtx (SImode); rtx src_hi = gen_reg_rtx (SImode); emit_move_insn (src_lo, gen_rtx_MEM (SImode, src)); emit_move_insn (src_hi, gen_rtx_MEM (SImode, src)); emit_insn (gen_rtx_SET (VOIDmode, dest_lo, gen_rtx_ZERO_EXTEND (SImode, src_lo))); emit_insn (gen_rtx_SET (VOIDmode, dest_hi, gen_rtx_ZERO_EXTEND (SImode, src_hi))); emit_move_insn (dest, gen_rtx_MEM (SImode, dest_lo)); emit_move_insn (dest, gen_rtx_MEM (SImode, dest_hi)); }" GCC,nvptx,92,"static void nvptx_gen_vcast(rtx reg, rtx val) { rtx broadcast_reg = gen_rtx_REG(reg->mode, reg->regno); rtx broadcast_val = gen_rtx_REG(val->mode, val->regno); emit_insn(gen_rtx_SET(broadcast_reg, broadcast_val)); }" GCC,nvptx,93,"void nvptx_gen_wcast(int pm, int rep, int reg) { switch (pm) { case 0: break; case 1: break; default: break; } if (rep > 0) { } }" GCC,nvptx,94,"nvptx_get_drap_rtx (void) { return gen_rtx_REG (Pmode, DRAP_REGNUM); }" GCC,nvptx,95,"void nvptx_get_worker_red_addr(int offset, int type) { asm volatile(""ld.param.u64 %0, [%1 + %2];"" : ""=r""(offset) : ""r""(nvptx_get_worker_red_addr), ""r""(offset) : ""memory""); return offset; }" GCC,nvptx,96,"void nvptx_goacc_reduction(int64_t *dest, int64_t *src, int64_t n) { int64_t i; for (i = 0; i < n; i++) { dest[i] += src[i]; } } ``` This function takes three arguments: * `dest`: A pointer to the destination array where the reduced values will be stored. * `src`: A pointer to the source array that contains the values to be reduced. * `n`: The number of elements in the source array. The function uses a simple loop to iterate over the elements in the source array and add them to the corresponding elements in the destination array. To use this function with the NVPTX backend of GCC, you would need to compile it with the `-march=nvptx` flag to enable the NVPTX instruction set. You can then use the function in your code as follows: ``` #include int main() { int64_t dest[10]; int64_t src[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; nvptx_goacc_reduction(dest, src, 10); return 0; }" GCC,nvptx,97,void nvptx_goacc_reduction_fini(void) { int target_specific = 1; __builtin_goacc_set_target_specific(target_specific); __builtin_goacc_reduction_fini(); } GCC,nvptx,98, GCC,nvptx,99,"nvptx_goacc_reduction_setup (void) { nvptx_target_specific_values = { .nvptx_target_specific_value_1 = 2, .nvptx_target_specific_value_2 = 3, .nvptx_target_specific_value_3 = 1, .nvptx_target_specific_value_4 = 5, .nvptx_target_specific_value_5 = 1 }; goacc_reduction_setup_func = nvptx_goacc_reduction_setup; }" GCC,nvptx,100,"void nvptx_goacc_reduction_teardown(uint32_t *target_specific_value) { } ``` In this example, the `nvptx_goacc_reduction_teardown` function takes a single argument, `target_specific_value`, which is a pointer to a `uint32_t` value that contains the target-specific value for the GOACC_REDUCTION_TEARDOWN operation. The function implementation would contain the logic for performing the GOACC_REDUCTION_TEARDOWN operation on the NVPTX backend of GCC. This could involve using the NVPTX intrinsics to perform the necessary operations on the target-specific value. For example, if the target-specific value is a 32-bit integer, you could use the `nvptx_add` intrinsic to perform the addition operation: ``` #include void nvptx_goacc_reduction_teardown(uint32_t *target_specific_value) { uint32_t result = nvptx_add(*target_specific_value, 1); *target_specific_value = result; }" GCC,nvptx,101," do { \ if (decl == NULL) { \ vector_length = 1; \ num_workers = 1; \ } else { \ if (fn_level < 0) { \ vector_length = 1; \ num_workers = 1; \ } else { \ vector_length = decl->vector_length; \ num_workers = decl->num_workers; \ } \ } \ if (vector_length < 1) { \ vector_length = 1; \ } \ if (num_workers < 1) { \ num_workers = 1; \ } \ } while (0) int main() { int vector_length = 0; int num_workers = 0; NVPTX_GOACC_VALIDATE_DIMS(NULL, -1, vector_length, num_workers); printf(""vector_length: %d, num_workers: %d\n"", vector_length, num_workers); return 0; }" GCC,nvptx,102,"void nvptx_init_axis_predicate(int regno, int axis) { asm volatile ( ""\t.reg.u32\t%%%s;\n"" ""\tmov.u32\t%%%s, %%tid.%s;\n"" ""\tsetp.ne.u32\t%%r%d, %%%s, 0;\n"" : ""=r"" (regno) : ""r"" (axis) : ""memory"" ); }" GCC,nvptx,103,void nvptx_init_builtins(void) { __builtin_nvptx_shuffle = __builtin_nvptx_shuffle_impl; __builtin_nvptx_shufflell = __builtin_nvptx_shufflell_impl; __builtin_nvptx_worker_addr = __builtin_nvptx_worker_addr_impl; __builtin_nvptx_cmp_swap = __builtin_nvptx_cmp_swap_impl; __builtin_nvptx_cmp_swapll = __builtin_nvptx_cmp_swapll_impl; } GCC,nvptx,104," rtx *loc) { rtx call_insn; rtx libcall_func; rtx libcall_args; rtx libcall_ret; libcall_func = XEXP (op, 0); libcall_args = XEXP (op, 1); call_insn = emit_call_insn (gen_rtx_CALL (VOIDmode, libcall_func, libcall_args)); libcall_ret = gen_rtx_REG (mode, REGNO (XEXP (call_insn, 0))); *loc = libcall_ret; }" GCC,nvptx,105, GCC,nvptx,106, GCC,nvptx,107,"nvptx_option_override (void) { targetm.target_option_override = nvptx_target_option_override; targetm.nvptx_worker_bcast = ""__worker_bcast""; targetm.nvptx_worker_red = ""__worker_red""; } static void nvptx_target_option_override (void) { targetm.nvptx_worker_bcast = ""__worker_bcast""; targetm.nvptx_worker_red = ""__worker_red""; }" GCC,nvptx,108,"nvptx_output_mov_insn (rtx_insn *insn) { rtx operands[3]; int noperands; int i; noperands = 0; for (i = 0; i < 3; i++) { operands[i] = NULL_RTX; if (INSN_CODE (insn) == CODE_FOR_movsi) { operands[i] = XEXP (insn, i); noperands++; } else if (INSN_CODE (insn) == CODE_FOR_movdi) { operands[i] = XEXP (insn, i); noperands++; }" GCC,nvptx,109,"void nvptx_output_skip(size_t size) { asm_out_file->write(NULL, size); }" GCC,nvptx,110, GCC,nvptx,111, GCC,nvptx,112,"void nvptx_record_offload_symbol(const char *symbol, int type, void *addr) { if (type == 0) { printf(""//:FUNC_MAP \""%s\"""", symbol); } else { printf(""//:VAR_MAP \""%s\"""", symbol); } printf("", %#x\n"", addr); }" GCC,nvptx,113,"nvptx_reorg (void) { rtx insn, set; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { set = single_set (insn); if (set && GET_CODE (set) == SUBREG) { rtx new_reg = gen_reg_rtx (GET_MODE (SUBREG_REG (set))); rtx copyin = gen_rtx_COPY (GET_MODE (SUBREG_REG (set)), SUBREG_REG (set)); rtx copyout = gen_rtx_COPY (GET_MODE (SUBREG_REG (set)), new_reg); emit_insn_before (copyin, insn); emit_insn_before (copyout, insn); SUBREG_REG (set) = new_reg; } } } }" GCC,nvptx,114,"void nvptx_sese_number(struct nvptx_basic_block *bb, struct nvptx_basic_block *parent, int orientation) { if (bb->visited) { return; } bb->visited = true; if (parent) { nvptx_sese_number(parent, bb, orientation); } bb->number = 3 * bb->number + orientation; if (bb->children) { nvptx_sese_number(bb->children, bb, orientation); } }" GCC,nvptx,115, GCC,nvptx,116,"nvptx_wpropagate (rtx_insn *insn, bool pre_p, bool is_call) { if (pre_p) { nvptx_spill_live_state (insn); } else { nvptx_fill_live_state (insn); } emit_insn_after (insn, gen_sequence ()); return nvptx_propagate (insn, pre_p, is_call); }" GCC,nvptx,117,"nvptx_wsync (void) { asm volatile (""wsync"" ::: ""memory""); }" GCC,nvptx,118,"void output_init_frag(struct frag *frag, int sym, int addend) { if (frag->fr_type == FR_INIT) { fprintf(frag->fr_file, ""%s"", frag->fr_string); if (sym) { fprintf(frag->fr_file, "" %s + %d"", sym->name, addend); } fprintf(frag->fr_file, ""\n""); } }" GCC,nvptx,119,"void vprop_gen(uint32_t *dst, uint32_t *src, uint32_t n) { uint32_t i; for (i = 0; i < n; i++) { dst[i] = src[i] & 0x1; } }" GCC,nvptx,120,"static void wprop_gen(void) { } ``` This function is a placeholder for the `nvptx_wpropagate` worker, which is used to propagate information about the nvptx backend of GCC Compiler. The `nvptx,1,1` target specific value is used to indicate that this function is for the nvptx backend of GCC Compiler. You can use this function as a placeholder in your code, and then implement the actual functionality of the `nvptx_wpropagate` worker in a separate file. For example, you could create a file named `nvptx_wpropagate.c` that contains the implementation of the `nvptx_wpropagate` worker, and then include that file in your code using the `#include` directive. Here is an example of how you could use the `wprop_gen` function in your code: ``` #include ""nvptx_wpropagate.c"" int main(void) { wprop_gen(); return 0; }" GCC,nvptx,121,"void write_fn_proto(FILE *s, tree decl, const char *name) { if (decl->common.code == NVPTX_FUNCTION_DECL) { fprintf(s, "".func %s\n"", name); fprintf(s, "".weak %s\n"", name); fprintf(s, "".visible %s\n"", name); fprintf(s, "".entry %s\n"", name); fprintf(s, "".func %s\n"", name); fprintf(s, ""0,1,1,1,1\n""); fprintf(s, ""main\n""); fprintf(s, ""0,1,1,1\n""); fprintf(s, "";\n""); } }" GCC,nvptx,122,"void write_worker_buffer(int alignment, char* name, int size) { fprintf(stderr, ""nvptx,0,.shared .align %d .u8 %s[%d];\n"", alignment, name, size); }" GCC,nvptx,123,"void diagnose_openacc_conflict(const char *optname, int optval) { if (optval && !strcmp(optname, ""fopenacc"")) { error(""option %s is not supported together with -fopenacc"", optname); } } ``` This function takes two arguments: `optname`, which is the name of the option being checked, and `optval`, which is the value of the option. The function first checks if the `optval` is non-zero, indicating that the option is enabled. If the option is enabled, the function checks if the `optname` is equal to ""fopenacc"". If it is, the function issues a diagnostic error message indicating that the option is not supported together with `-fopenacc`. Note that the `error` function is used to issue the diagnostic error message. This function is defined in the `gcc-diagnostic.h` header file, which is included in the `gcc-diagnostic.c` source file. Here's an example of how the `diagnose_openacc_conflict` function could be used: ```c int main(int argc, char **argv) { parse_options(argc, argv); diagnose_openacc_conflict(""fopenacc"", get_option_value(""fopenacc"")); ... }" GCC,nvptx,124,"extern void nvptx_assemble_undefined_decl (tree decl, const char *name, FILE *file) { if (TREE_CODE (decl) == VAR_DECL) { fprintf (file, ""\t.extern %s\n"", name); } else if (TREE_CODE (decl) == FUNCTION_DECL) { fprintf (file, ""\t.extern %s\n"", name); } else { fprintf (file, ""\t.extern %s\n"", name); } }" GCC,nvptx,125,"void nvptx_file_end(void) { fprintf(asm_out_file, "".extern .shared .u%d __nvptx_stacks[32];\n"", 32); fprintf(asm_out_file, "".extern .shared .u32 __nvptx_uni[32];\n""); }" GCC,nvptx,126,"void nvptx_function_end(FILE *file, const char *outargs, const char *hfp, const char *punbuffer, const char *retval, const char *retval_in, const char *hr6, const char *hr7, const char *hr8, const char *hr9, const char *hr10, const char *hr11, const char *hr12, const char *hr13, const char *argp, const char *frame) { fprintf(file, ""}""); }" GCC,nvptx,127,"nvptx_gen_shuffle (rtx reg, rtx dest, int n_lanes) { int i; for (i = 0; i < n_lanes; i++) { rtx shuffle_reg = gen_rtx_REG (reg->mode, i); emit_insn (gen_rtx_SET (VOIDmode, shuffle_reg, reg)); } }" GCC,nvptx,128," do { \ if (decl == NULL) { \ vector_length = 1; \ num_workers = 1; \ } else { \ if (fn_level < 0) { \ vector_length = 1; \ num_workers = 1; \ } else { \ vector_length = decl->vector_length; \ num_workers = decl->num_workers; \ } \ } \ if (vector_length < 1) { \ vector_length = 1; \ } \ if (num_workers < 1) { \ num_workers = 1; \ } \ } while (0) int main() { int vector_length = 0; int num_workers = 0; NVPTX_GOACC_VALIDATE_DIMS(NULL, -1, vector_length, num_workers); printf(""vector_length: %d, num_workers: %d\n"", vector_length, num_workers); return 0; }" GCC,nvptx,129,"nvptx_option_override (void) { if (TARGET_NVPTX) { target_option_override (TARGET_NVPTX, 1, 1, 0, 0, 0, 17, 17, 17, ""__worker_bcast"", ""__worker_red"", ""-mgomp"", ""-msoft-stack"", ""-muniform-simt""); } }" GCC,nvptx,130,"void nvptx_output_softstack_switch(bool entering, int ptr, int size, int align) { if (entering) { cvta.local.u8 %r0, %simtstack_ar + %ptr; sub.u8 %r0, %r0, %size; and.u8 %r0, %r0, -%align; st.u8 [%r0 + -%size], %ptr; } else { sub.u8 %r0, %ptr, %size; ld.u8 %ptr, [%r0 + -%size]; } }" GCC,nvptx,131,"nvptx_reorg (void) { rtx insn, set; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { set = single_set (insn); if (set && GET_CODE (set) == SUBREG) { rtx new_reg = gen_reg_rtx (GET_MODE (SUBREG_REG (set))); rtx copyin = gen_rtx_COPY (GET_MODE (SUBREG_REG (set)), SUBREG_REG (set)); rtx copyout = gen_rtx_COPY (GET_MODE (SUBREG_REG (set)), new_reg); emit_insn_before (copyin, insn); emit_insn_before (copyout, insn); SUBREG_REG (set) = new_reg; } } } }" GCC,nvptx,132, GCC,nvptx,133,"nvptx_find_par (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; int i; if (t == NULL_TREE) return NULL_TREE; if (TREE_CODE (t) == PARM_DECL) { return t; }" GCC,nvptx,134,"void nvptx_gen_wcast(int pm, int rep, int reg) { switch (pm) { case 0: break; case 1: break; default: break; } if (rep > 0) { } }" GCC,nvptx,135, GCC,nvptx,136, GCC,nvptx,137,"nvptx_process_pars (tree par, unsigned int mask) { if (TREE_CODE (par) == PARALLEL) { tree inner_par = TREE_OPERAND (par, 0); if (inner_par) mask |= nvptx_process_pars (inner_par, mask); } if (TREE_CODE (par) == PARALLEL) { tree inner_par = TREE_OPERAND (par, 0); if (inner_par) { mask |= nvptx_process_pars (inner_par, mask); TREE_OPERAND (par, 0) = NULL_TREE; } } return mask; }" GCC,nvptx,138,"nvptx_reorg (void) { rtx insn, set; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { set = single_set (insn); if (set && GET_CODE (set) == SUBREG) { rtx new_reg = gen_reg_rtx (GET_MODE (SUBREG_REG (set))); rtx copyin = gen_rtx_COPY (GET_MODE (SUBREG_REG (set)), SUBREG_REG (set)); rtx copyout = gen_rtx_COPY (GET_MODE (SUBREG_REG (set)), new_reg); emit_insn_before (copyin, insn); emit_insn_before (copyout, insn); SUBREG_REG (set) = new_reg; } } } }" GCC,nvptx,139, GCC,nvptx,140,"nvptx_wpropagate (rtx_insn *insn, bool pre_p, bool is_call) { if (pre_p) { nvptx_spill_live_state (insn); } else { nvptx_fill_live_state (insn); } emit_insn_after (insn, gen_sequence ()); return nvptx_propagate (insn, pre_p, is_call); }" GCC,nvptx,141, GCC,nvptx,142,"nvptx_find_par (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; int i; if (t == NULL_TREE) return NULL_TREE; if (TREE_CODE (t) == PARM_DECL) { return t; }" GCC,nvptx,143,void append(struct node *child) { child->brackets = child->brackets + child->brackets; } GCC,nvptx,144,"bb_first_real_insn (basic_block bb) { rtx insn; FOR_BB_INSNS (bb, insn) { if (INSN_P (insn)) return insn; } return NULL_RTX; }" GCC,nvptx,145,"diagnose_openacc_conflict (const char *optname, int optval) { if (optval && flag_openacc) { error (""option %s is not supported together with %<-fopenacc%>"", optname); } }" GCC,nvptx,146, GCC,nvptx,147, GCC,nvptx,148,"rtx get_replacement(rtx r, uint32_t n) { if (r == NULL) { r = rtx_alloc(n); } else { rtx_reuse(r, n); } return r; }" GCC,nvptx,149,"uint64_t hash(uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t return_type) { uint64_t hash = 0; hash = (hash << 32) | (arg1 & 0xFFFFFFFF); hash = (hash << 32) | (arg2 & 0xFFFFFFFF); hash = (hash << 32) | (arg3 & 0xFFFFFFFF); hash = (hash << 32) | (return_type & 0xFFFFFFFF); hash ^= hash >> 32; hash ^= hash >> 16; hash ^= hash >> 8; hash ^= hash >> 4; hash ^= hash >> 2; hash ^= hash >> 1; return hash; }" GCC,nvptx,150,"has_vector_partitionable_routine_calls_p (tree fndecl) { tree call_expr; tree callee_decl; FOR_EACH_CALL_EXPR (call_expr, fndecl) { callee_decl = CALL_EXPR_FN (call_expr); if (TREE_CODE (callee_decl) == FUNCTION_DECL && TREE_CODE (callee_decl) == NVPTX_FUNCTION_DECL && TREE_CODE (callee_decl) == VECTOR_PARTITIONABLE_ROUTINE_DECL) return true; } return false; }" GCC,nvptx,151,"void init_frame(int regno, int size) { if (size == 0) { asm volatile (""mov.u%d %s, 0"" : ""=r"" (regno) : ""0"" (size)); return; } asm volatile ("".local .align %d .b8 %s_ar[%u];"" : : ""i"" (size), ""i"" (regno)); asm volatile (""cvta.local.u%d %s, %s_ar"" : ""=r"" (regno) : ""0"" (size)); asm volatile (""mov.u%d %s, 0"" : ""=r"" (regno) : ""0"" (size)); }" GCC,nvptx,152,"void init_softstack_frame(void) { .reg.u32 %%fstmp0; .reg.u32 %%fstmp1; .reg.u32 %%fstmp2; mov.u32 %%fstmp0, %%tid.y; mul.u32 %%fstmp1, %%fstmp0, 64; mov.u32 %%fstmp2, __nvptx_stacks; add.u32 %%fstmp2, %%fstmp2, %%fstmp1; ld.shared.u32 %s, [%s]; sub.u32 %s, %s, %%fstmp2; and.b32 %s, %s, 0; sub.u32 %s, %s, %%fstmp2; st.shared.u32 [%s], %s; }" GCC,nvptx,153,"int maybe_split_mode(int *pmode) { int n = 1; if (*pmode == NVPTX_PMODE_SPLIT) { n = 2; } return n; } ``` This function takes a pointer to an integer as an argument, which is assumed to be the `* PMODE` value. It checks if the value is equal to `NVPTX_PMODE_SPLIT`, which is a target-specific value for the nvptx backend of GCC Compiler. If it is, the function returns 2, indicating that the pseudo should be split into two pieces. Otherwise, the function returns 1, indicating that the pseudo should not be split. You can use this function in your code by calling it with the appropriate argument, like this: ``` int pmode = NVPTX_PMODE_SPLIT; int n = maybe_split_mode(&pmode); if (n > 1) { } else { }" GCC,nvptx,154,"bool needs_neutering_p(insn) { if (insn->target == nvptx) { return true; } return false; } ``` This function takes an `insn` parameter, which is a pointer to an instruction in the GCC Compiler's internal representation. It checks if the target of the instruction is nvptx, and if so, returns true. Otherwise, it returns false. You can use this function in your GCC Compiler plugin to determine if an instruction needs neutering for the nvptx backend. For example: ``` if (needs_neutering_p(insn)) { }" GCC,nvptx,155,"void nvptx_asm_declare_constant_name(tree exp, const char *name, size_t size, FILE *file) { fprintf(file, ""\t.const\t%s, %s, %zu\n"", name, exp, size); }" GCC,nvptx,156,"void nvptx_assemble_decl_begin(const char *name, const char *section, tree type, unsigned int size, unsigned int align) { fprintf(asm_out_file, "".decl %s, %s, %s, %d, %d\n"", name, section, type, size, align); fprintf(asm_out_file, "".init %s, %s, %s, %d, %d\n"", name, section, type, size, align); }" GCC,nvptx,157,void nvptx_assemble_decl_end(tree decl) { if (TREE_CODE(decl) == VAR_DECL) { if (DECL_INITIAL(decl) != NULL_TREE) { nvptx_assemble_decl_init(decl); } else { nvptx_assemble_decl(decl); } } else if (TREE_CODE(decl) == FUNCTION_DECL) { nvptx_assemble_function_decl(decl); } else { nvptx_assemble_decl(decl); } nvptx_assemble_target_specific_value(decl); } void nvptx_assemble_target_specific_value(tree decl) { if (TREE_CODE(decl) == VAR_DECL) { nvptx_assemble_target_specific_value_for_var_decl(decl); } else if (TREE_CODE(decl) == FUNCTION_DECL) { nvptx_assemble_target_specific_value_for_function_decl(decl); } else { nvptx_assemble_target_specific_value_for_decl(decl); } } GCC,nvptx,158, GCC,nvptx,159,"extern void nvptx_assemble_undefined_decl (tree decl, const char *name, FILE *file) { if (TREE_CODE (decl) == VAR_DECL) { fprintf (file, ""\t.extern %s\n"", name); } else if (TREE_CODE (decl) == FUNCTION_DECL) { fprintf (file, ""\t.extern %s\n"", name); } else { fprintf (file, ""\t.extern %s\n"", name); } }" GCC,nvptx,160,"void nvptx_assemble_value(uint64_t value, size_t size) { uint64_t chunk = 0; size_t i; for (i = 0; i < size; i++) { chunk = (chunk << 8) | (value & 0xff); value >>= 8; } for (i = 0; i < size; i++) { emit_byte(chunk & 0xff); chunk >>= 8; } } void emit_byte(uint8_t byte) { }" GCC,nvptx,161,"uint32_t nvptx_builtin_decl(uint32_t code) { return NVPTX_BUILTIN(code); } ``` This function uses the `__builtin_nvptx_` prefix to define the NVPTX builtin for a given code. The `NVPTX_BUILTIN` macro is used to define the builtin for the NVPTX target, and the `nvptx_builtin_decl` function is used to return the builtin for a given code. You can use this function in your GCC compiler by including the header file that defines it, and then calling the function with the appropriate code. For example: ``` #include ""nvptx_builtin.h"" int main() { uint32_t code = 0x1234; uint32_t builtin = nvptx_builtin_decl(code); return 0; }" GCC,nvptx,162, GCC,nvptx,163,"nvptx_call_insn_is_syscall_p (rtx_insn *insn) { if (GET_CODE (insn) != CALL_INSN) return false; tree callee = CALL_EXPR_FN (insn); if (TREE_CODE (callee) != FUNCTION_DECL) return false; const char *callee_name = IDENTIFIER_POINTER (DECL_NAME (callee)); if (strcmp (callee_name, ""malloc"") == 0 || strcmp (callee_name, ""free"") == 0 || strcmp (callee_name, ""vprintf"") == 0) return true; return false; }" GCC,nvptx,164,nvptx_cannot_copy_insn_p (rtx_insn *insn) { if (insn->code == CODE_FOR_nvptx_insn) { for (int i = 0; i < insn->n_operands; i++) { rtx op = insn->operand[i]; if (op->code == CODE_FOR_nvptx_operand) { if (op->nvptx_operand_type == NVPTX_OPERAND_TYPE_REGISTER) { if (op->nvptx_register_type == NVPTX_REGISTER_TYPE_SPECIAL) { return true; } } } } } return false; } GCC,nvptx,165," machine_mode to) { if (!TARGET_NVPTX_CAN_CHANGE_MODE_CLASS) return false; if (!TARGET_NVPTX_MODE_SUPPORTED (mode)) return false; if (!TARGET_NVPTX_MODE_CHANGE_OK (mode, to)) return false; if (!TARGET_NVPTX_MODE_CHANGE_OK (from, mode)) return false; return true; } ``` This function checks if the target supports changing the mode class, if the mode is supported by the target, if the mode can be changed to the target mode, and if the mode can be changed from the source mode. If all of these conditions are met, the function returns true, indicating that the mode can be changed. You can use this function in your GCC backend to determine if a mode can be changed to another mode. For example, you can use it to check if a mode can be changed to a wider mode, or if a mode can be changed to a narrower mode. Here is an example of how you can use this function in your GCC backend: ``` if (nvptx_can_change_mode_class (mode, from, to)) { } else { }" GCC,nvptx,166,nvptx_condjump_label (rtx_insn *insn) { if (STRICT) { if (JUMP_P (insn)) { emit_label (insn); } else { } } else { } } GCC,nvptx,167,"void nvptx_cta_sync(int lock, int threads) { asm volatile(""bar.sync %0, %1"" : : ""r""(lock), ""r""(threads)); }" GCC,nvptx,168,"nvptx_declare_object_name (FILE *file, const char *name, const char *decl) { if (name == NULL || decl == NULL) return; fprintf (file, ""\t%s"", name); fprintf (file, ""\t%s"", decl); fprintf (file, ""\t%s"", "".weak""); fprintf (file, ""\t%s"", "".visible""); }" GCC,nvptx,169,uint32_t nvptx_dim_limit(void) { uint32_t max_dim_size = 0; #ifdef __NVPTX__ max_dim_size = 1024; #endif return max_dim_size; } GCC,nvptx,170, GCC,nvptx,171,"nvptx_discover_pre (basic_block bb) { rtx_insn *head_or_tail = BB_HEAD (bb); if (!NVPTX_INSN_P (head_or_tail)) return; basic_block pred_bb = BB_PRED (bb, 0); if (!NVPTX_BLOCK_P (pred_bb)) return; rtx_insn *prehead_or_pretail = NULL; for (rtx_insn *insn = BB_HEAD (pred_bb); insn != NULL; insn = NEXT_INSN (insn)) { if (NVPTX_INSN_P (insn) && NVPTX_INSN_TYPE (insn) == NVPTX_INSN_TYPE_PREHEAD) { prehead_or_pretail = insn; break; } }" GCC,nvptx,172,"void nvptx_dump_pars(int mask, int head, int tail) { printf(""nvptx: mask %d head=%d, tail=%d\n"", mask, head, tail); }" GCC,nvptx,173,"nvptx_emit_forking (rtx_insn *insn, rtx_insn *mask) { rtx_insn *fork_insn; rtx_insn *join_insn; fork_insn = gen_rtx_INSN (CODE_LABEL, NULL_RTX, NULL_RTX); join_insn = gen_rtx_INSN (CODE_LABEL, NULL_RTX, NULL_RTX); emit_insn (fork_insn); emit_insn (join_insn); set_insn_target (mask, fork_insn); set_insn_target (mask, join_insn); }" GCC,nvptx,174,"nvptx_emit_joining (rtx_insn *insn, rtx_insn *next_insn) { rtx_insn *join_insn = gen_rtx_INSN (CODE_FOR_nvptx_join, insn->operands[0], insn->operands[1], insn->operands[2]); join_insn->operands[0] = insn->operands[0]; join_insn->operands[1] = insn->operands[1]; join_insn->operands[2] = insn->operands[2]; emit_insn (join_insn); next_insn = join_insn; }" GCC,nvptx,175,"nvptx_encode_section_info (tree decl, tree section_name) { if (TREE_CODE (decl) == VAR_DECL) { if (section_name) { if (TREE_CODE (section_name) == STRING_CST) return section_name; const char *section_name_str = TREE_STRING_POINTER (section_name); if (strcmp (section_name_str, "".shared"") == 0) return build_string ("".shared""); else if (strcmp (section_name_str, "".static"") == 0) return build_string ("".static""); else error (""invalid section name %qs"", section_name_str); } else { return build_string ("".shared""); } } else { return build_string (""""); } }" GCC,nvptx,176, GCC,nvptx,177, GCC,nvptx,178, GCC,nvptx,179,"void nvptx_expand_cmp_swap(uint32_t *ptr, uint32_t old_val, uint32_t new_val) { uint32_t old_val_copy = old_val; uint32_t new_val_copy = new_val; __nvptx_cmp_swap(ptr, old_val_copy, new_val_copy); return old_val_copy; }" GCC,nvptx,180, GCC,nvptx,181,"nvptx_expand_oacc_fork (gimple_stmt_iterator *gsi, gimple *stmt) { tree nvptx_target = get_target_specific_value (stmt, ""nvptx""); if (nvptx_target) { gimple_stmt_iterator fork_gsi = gsi_for_stmt (stmt); gimple *fork_stmt = gsi_stmt (fork_gsi); tree fork_decl = gimple_call_fndecl (fork_stmt); gimple_expand_oacc_fork_join (fork_gsi, fork_stmt, fork_decl, nvptx_target); } }" GCC,nvptx,182,"void nvptx_expand_shared_addr(uint32_t *addr) { *addr = (*addr << 16) | (1 << 15); } ``` This function takes a pointer to a `uint32_t` value as an argument, which represents the shared memory reduction address. The function shifts the value left by 16 bits and sets the least significant bit to 1, which expands the address to the full 32-bit range. You can use this function in your GCC Compiler code to expand shared memory reduction addresses for the nvptx backend. For example: ``` #include int main() { uint32_t addr = 0x12345678; nvptx_expand_shared_addr(&addr); return 0; }" GCC,nvptx,183,"nvptx_expand_shuffle (rtx operands[], int ndim, int nvptx, int 0, int 1, int 2) { if (ndim == 1) { if (nvptx == 0) return; if (nvptx == 1) { rtx permute = gen_rtx_PERMUTE (operands[0]->mode, operands[0], operands[1]); emit_insn (permute); return; } if (nvptx == 2) { rtx permute = gen_rtx_PERMUTE (operands[0]->mode, operands[0], operands[1]); emit_insn (permute); rtx shuffle = gen_rtx_SHUFFLE (operands[0]->mode, operands[0], operands[1]); emit_insn (shuffle); return; } }" GCC,nvptx,184,"void nvptx_file_end(void) { fprintf(asm_out_file, "".extern .shared .u%d __nvptx_stacks[32];\n"", 32); fprintf(asm_out_file, "".extern .shared .u32 __nvptx_uni[32];\n""); }" GCC,nvptx,185,"void nvptx_file_start(void) { fprintf(asm_out_file, ""// BEGIN PREAMBLE\n""); fprintf(asm_out_file, ""\t.version\t%d\n"", TARGET_NVPTX_VERSION); fprintf(asm_out_file, ""\t.target\tsm_%d\n"", TARGET_NVPTX_SM); fprintf(asm_out_file, ""\t.address_size %d\n"", TARGET_NVPTX_ADDRESS_SIZE); }" GCC,nvptx,186,"nvptx_find_par (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; int i; if (t == NULL_TREE) return NULL_TREE; if (TREE_CODE (t) == PARM_DECL) { return t; }" GCC,nvptx,187,"nvptx_function_arg (tree decl, tree type, int regno) { if (TREE_CODE (type) == REAL_TYPE) { regno = NVPTX_FPR_REGNO (regno); } else if (TREE_CODE (type) == INTEGER_TYPE) { regno = NVPTX_GPR_REGNO (regno); } else { regno = NVPTX_STACK_REGNO (regno); } DECL_REG (decl) = regno; }" GCC,nvptx,188,"nvptx_function_arg_advance (struct function *fun, struct function_arg_info *arg) { if (arg->mode == VOIDmode) return; if (arg->mode == BLKmode) { arg->size = (arg->size + 7) & ~7; }" GCC,nvptx,189,"nvptx_function_arg_boundary (tree type, unsigned int align) { return MAX (align, 16); }" GCC,nvptx,190,"void nvptx_function_end(FILE *file) { fputs(""}\n"", file); }" GCC,nvptx,191,"nvptx_function_incoming_arg (tree decl, tree arg) { if (POINTER_TYPE_P (TREE_TYPE (arg))) { tree pointer_type = TREE_TYPE (arg); unsigned int pointer_align = TYPE_ALIGN (pointer_type); unsigned int arg_align = TYPE_ALIGN (TREE_TYPE (arg)); if (pointer_align > arg_align) { TREE_TYPE (arg) = build_aligned_type (TREE_TYPE (arg), pointer_align); } }" GCC,nvptx,192, GCC,nvptx,193,"nvptx_function_value (tree type, tree func) { if (TYPE_MODE (type) == BLKmode) return gen_rtx_MEM (BLKmode, gen_rtx_REG (Pmode, 0)); return gen_rtx_REG (TYPE_MODE (type), 0); }" GCC,nvptx,194,nvptx_function_value_regno_p (unsigned int regno) { return regno >= NVPTX_FIRST_RETURN_REGNUM && regno <= NVPTX_LAST_RETURN_REGNUM; } GCC,nvptx,195,"void nvptx_generate_vector_shuffle(uint32_t var, uint32_t shfl, uint32_t dest_var) { uint32_t shfl_down = shfl & 0x1f; uint32_t shfl_up = (shfl >> 5) & 0x1f; uint32_t shfl_down_mask = (shfl_down << 16) | (shfl_down << 24); uint32_t shfl_up_mask = (shfl_up << 16) | (shfl_up << 24); uint32_t dest_var_mask = (dest_var << 16) | (dest_var << 24); asm volatile ( ""shfl.down.b32 %0, %1, %2, %3;"" : ""=r"" (dest_var_mask) : ""r"" (var), ""r"" (shfl_down_mask), ""r"" (shfl_up_mask) ); }" GCC,nvptx,196,"nvptx_gen_pack (rtx dest, rtx src1, rtx src2) { rtx tmp; tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, src1, src2)); emit_insn (gen_rtx_SET (VOIDmode, dest, tmp)); }" GCC,nvptx,197,"void nvptx_gen_shared_bcast(int pm, int rep, int reg) { if (pm == 0) { asm volatile(""mov.u32 %0, %1\n\t"" ""st.shared.b32 [%2], %0\n\t"" : ""=r""(reg) : ""r""(reg), ""r""(rep) : ""memory""); } else { asm volatile(""ld.shared.b32 %0, [%1]\n\t"" ""mov.u32 %2, %0\n\t"" : ""=r""(reg) : ""r""(rep), ""r""(reg) : ""memory""); } }" GCC,nvptx,198,"nvptx_gen_unpack (rtx dest, rtx src, int mode) { rtx dest_lo = gen_reg_rtx (SImode); rtx dest_hi = gen_reg_rtx (SImode); rtx src_lo = gen_reg_rtx (SImode); rtx src_hi = gen_reg_rtx (SImode); emit_move_insn (src_lo, gen_rtx_MEM (SImode, src)); emit_move_insn (src_hi, gen_rtx_MEM (SImode, src)); emit_insn (gen_rtx_SET (VOIDmode, dest_lo, gen_rtx_ZERO_EXTEND (SImode, src_lo))); emit_insn (gen_rtx_SET (VOIDmode, dest_hi, gen_rtx_ZERO_EXTEND (SImode, src_hi))); emit_move_insn (dest, gen_rtx_MEM (SImode, dest_lo)); emit_move_insn (dest, gen_rtx_MEM (SImode, dest_hi)); }" GCC,nvptx,199,"nvptx_gen_warp_bcast (rtx reg, int n) { if (n == 1) { emit_insn (gen_rtx_SET (VOIDmode, reg, gen_rtx_BROADCAST (VOIDmode, reg))); } else { int i; for (i = 0; i < n; i++) { rtx broadcast = gen_rtx_BROADCAST (VOIDmode, reg); emit_insn (gen_rtx_SET (VOIDmode, reg, broadcast)); } } }" GCC,nvptx,200,"nvptx_get_drap_rtx (void) { return gen_rtx_REG (Pmode, DRAP_REGNUM); }" GCC,nvptx,201,"nvptx_get_shared_red_addr (int offset, int type) { return __builtin_nvptx_shared_red_addr (offset, type); }" GCC,nvptx,202, GCC,nvptx,203,"nvptx_get_unisimt_predicate (void) { return BImode (nvptx_get_unisimt_predicate_reg ()); } ``` This function uses the `nvptx_get_unisimt_predicate_reg` function to get the predicate register for the nvptx backend, and then returns it as a BImode value. Note that the `nvptx_get_unisimt_predicate_reg` function is not defined in the GCC source code, so you will need to define it yourself. Here is an example of how you might define it: ``` static inline int nvptx_get_unisimt_predicate_reg (void) { return 0; }" GCC,nvptx,204,void nvptx_globalize_label(const char *label) { } GCC,nvptx,205,void nvptx_global_lock_addr(void) { if (!nvptx_global_lock_addr) { nvptx_global_lock_addr = (uintptr_t)__reduction_lock; } return nvptx_global_lock_addr; } GCC,nvptx,206, GCC,nvptx,207, GCC,nvptx,208,"static void nvptx_goacc_fork_join(void) { if (__builtin_nvptx_is_fork_join_needed()) { __builtin_nvptx_fork_join(); } } ``` This function uses the `__builtin_nvptx_is_fork_join_needed` intrinsic to determine whether fork and joins are needed. If they are needed, the function performs them using the `__builtin_nvptx_fork_join` intrinsic. You can call this function in your code to determine whether fork and joins are needed for the nvptx backend of GCC Compiler. For example: ``` int main() { nvptx_goacc_fork_join(); }" GCC,nvptx,209,"void nvptx_goacc_reduction(int64_t *dest, int64_t *src, int64_t n) { int64_t i; for (i = 0; i < n; i++) { dest[i] += src[i]; } } ``` This function takes three arguments: * `dest`: A pointer to the destination array where the reduced values will be stored. * `src`: A pointer to the source array that contains the values to be reduced. * `n`: The number of elements in the source array. The function uses a simple loop to iterate over the elements in the source array and add them to the corresponding elements in the destination array. To use this function with the NVPTX backend of GCC, you would need to compile it with the `-march=nvptx` flag to enable the NVPTX instruction set. You can then use the function in your code as follows: ``` #include int main() { int64_t dest[10]; int64_t src[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; nvptx_goacc_reduction(dest, src, 10); return 0; }" GCC,nvptx,210,void nvptx_goacc_reduction_fini(void) { int target_specific = 1; __builtin_goacc_set_target_specific(target_specific); __builtin_goacc_reduction_fini(); } GCC,nvptx,211, GCC,nvptx,212,"nvptx_goacc_reduction_setup (void) { nvptx_target_specific_values = { .nvptx_target_specific_value_1 = 2, .nvptx_target_specific_value_2 = 3, .nvptx_target_specific_value_3 = 1, .nvptx_target_specific_value_4 = 5, .nvptx_target_specific_value_5 = 1 }; goacc_reduction_setup_func = nvptx_goacc_reduction_setup; }" GCC,nvptx,213,"void nvptx_goacc_reduction_teardown(uint32_t *target_specific_value) { } ``` In this example, the `nvptx_goacc_reduction_teardown` function takes a single argument, `target_specific_value`, which is a pointer to a `uint32_t` value that contains the target-specific value for the GOACC_REDUCTION_TEARDOWN operation. The function implementation would contain the logic for performing the GOACC_REDUCTION_TEARDOWN operation on the NVPTX backend of GCC. This could involve using the NVPTX intrinsics to perform the necessary operations on the target-specific value. For example, if the target-specific value is a 32-bit integer, you could use the `nvptx_add` intrinsic to perform the addition operation: ``` #include void nvptx_goacc_reduction_teardown(uint32_t *target_specific_value) { uint32_t result = nvptx_add(*target_specific_value, 1); *target_specific_value = result; }" GCC,nvptx,214,"void nvptx_goacc_validate_dims(tree decl, int fn_level, int target_specific_value) { if (decl == NULL) { if (target_specific_value == 0) { decl = build_decl(UNKNOWN_LOCATION, TREE_CODE_DECL, ""nvptx"", 0, 0, 0, 0, 0, 0); } else { decl = build_decl(UNKNOWN_LOCATION, TREE_CODE_DECL, ""nvptx"", target_specific_value, 0, 0, 0, 0, 0); } } if (TREE_CODE(decl) == TREE_CODE_DECL && DECL_NON_ROUTINE_P(decl)) { if (target_specific_value == 0) { decl = build_decl(UNKNOWN_LOCATION, TREE_CODE_DECL, ""nvptx"", 0, 0, 0, 0, 0, 0); }" GCC,nvptx,215, GCC,nvptx,216," int flags, bool *no_add_attrs) { if (TREE_CODE (decl) != VAR_DECL) { error (""%qE attribute only applies to variables"", name); *no_add_attrs = true; return; } if (DECL_STORAGE_CLASS (decl) == SC_AUTO) { error (""%qE attribute not allowed with auto storage class"", name); *no_add_attrs = true; return; } if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE) { error (""%qE attribute not allowed with reference type"", name); *no_add_attrs = true; return; } if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE) { error (""%qE attribute not allowed with pointer type"", name); *no_add_attrs = true; return; }" GCC,nvptx,217,"nvptx_hard_regno_nregs (int regno, int *nregs) { if (regno >= FIRST_PSEUDO_REGISTER) { *nregs = 1; return; }" GCC,nvptx,218,"void nvptx_init_axis_predicate(int regno, int name) { } ``` This function takes two arguments: `regno` and `name`. The `regno` argument is the number of the predicate register to initialize, and the `name` argument is the name of the axis to check. Inside the function, you can use the `nvptx` intrinsics to emit the necessary code to initialize the predicate register. For example, you can use the `nvptx_mov` intrinsic to move the value of the `tid.x` register into the `regno` predicate register, like this: ``` nvptx_mov(regno, tid.x); ``` You can also use the `nvptx_setp` intrinsic to set the predicate register to indicate whether we are not lane zero on the `name` axis, like this: ``` nvptx_setp(regno, name, 0); ``` This will set the predicate register to 1 if we are not lane zero on the `name` axis, and to 0 otherwise. Here's an example of how you could implement the `nvptx_init_axis_predicate` function using these intrinsics: ``` void nvptx_init_axis_predicate(int regno, int name) { nvptx_mov(regno, tid.x); nvptx_setp(regno, name, 0); }" GCC,nvptx,219,void nvptx_init_builtins(void) { __builtin_nvptx_shuffle = __builtin_nvptx_shuffle_impl; __builtin_nvptx_shufflell = __builtin_nvptx_shufflell_impl; __builtin_nvptx_worker_addr = __builtin_nvptx_worker_addr_impl; __builtin_nvptx_vector_addr = __builtin_nvptx_vector_addr_impl; __builtin_nvptx_cmp_swap = __builtin_nvptx_cmp_swap_impl; __builtin_nvptx_cmp_swapll = __builtin_nvptx_cmp_swapll_impl; __builtin_nvptx_membar_gl = __builtin_nvptx_membar_gl_impl; __builtin_nvptx_membar_cta = __builtin_nvptx_membar_cta_impl; } GCC,nvptx,220,nvptx_init_machine_status (struct machine_function *mach) { mach = XCNEW (struct machine_function); mach->target = TARGET_NVPTX; mach->init_machine_status (); } GCC,nvptx,221,"void nvptx_init_oacc_workers(void) { .reg.u32 %%tidy; .reg.u64 %%t_bcast; .reg.u64 %%y64; mov.u32 %%tidy, %%tid.y; cvt.u64.u32 %%y64, %%tidy; add.u64 %%y64, %%y64, 1; cvta.shared.u64 %%t_bcast, __oacc_bcast; mad.lo.u64 %%r%d, %%y64, %d, %%t_bcast; add.u32 %%r%d, %%tidy, 1; }" GCC,nvptx,222, GCC,nvptx,223,"nvptx_legitimate_address_p (rtx x, machine_mode mode) { if (mode == VOIDmode) return true; if (GET_CODE (x) == REG) return true; if (GET_CODE (x) == MEM) { rtx addr = XEXP (x, 0); if (GET_CODE (addr) == REG) return true; if (GET_CODE (addr) == PLUS) { rtx op0 = XEXP (addr, 0); rtx op1 = XEXP (addr, 1); if (GET_CODE (op0) == REG && GET_CODE (op1) == CONST_INT) return true; } } return false; }" GCC,nvptx,224," rtx *loc) { rtx call_insn; rtx libcall_func; rtx libcall_args; rtx libcall_ret; libcall_func = XEXP (op, 0); libcall_args = XEXP (op, 1); call_insn = emit_call_insn (gen_rtx_CALL (VOIDmode, libcall_func, libcall_args)); libcall_ret = gen_rtx_REG (mode, REGNO (XEXP (call_insn, 0))); *loc = libcall_ret; }" GCC,nvptx,225,"nvptx_libc_has_function (const char *name) { if (strcmp (name, ""__nvptx_libc_has_function"") == 0) return true; return false; }" GCC,nvptx,226,"nvptx_maybe_record_fnsym (rtx sym) { if (GET_CODE (sym) == SYMBOL_REF && SYMBOL_REF_DECL (sym) != NULL) { tree decl = SYMBOL_REF_DECL (sym); if (TREE_CODE (decl) == FUNCTION_DECL) { nvptx_record_fnsym (sym); } } } ``` This function takes a single argument `sym`, which is a `rtx` representing a symbol. It checks if the symbol is a `SYMBOL_REF` and if it refers to an external function. If it does, it records the function symbol as needed using the `nvptx_record_fnsym` function. You can call this function from the `nvptx_expand_function` function, which is responsible for expanding the body of a function. You can add a call to `nvptx_maybe_record_fnsym` at the beginning of the `nvptx_expand_function` function, like this: ``` static void nvptx_expand_function (tree fndecl) { nvptx_maybe_record_fnsym (fndecl); }" GCC,nvptx,227,"nvptx_modes_tieable_p (machine_mode mode1, machine_mode mode2) { return mode1 == mode2; }" GCC,nvptx,228,"static const char *nvptx_call_redirect_names[] = { ""__nvptx_call"", ""__nvptx_malloc"", ""__nvptx_free"", ""__nvptx_realloc"", }; static const char *nvptx_call_redirect_targets[] = { ""malloc"", ""free"", ""realloc"", }; static const size_t nvptx_call_redirect_targets_size = sizeof(nvptx_call_redirect_targets) / sizeof(nvptx_call_redirect_targets[0]); static const char *nvptx_name_replacement(const char *name) { for (size_t i = 0; i < nvptx_call_redirect_targets_size; i++) { if (strcmp(name, nvptx_call_redirect_targets[i]) == 0) { return nvptx_call_redirect_names[i]; } } return name; }" GCC,nvptx,229,bool nvptx_needs_shared_bcast(int MASK) { return MASK & (1 << 31); } GCC,nvptx,230,"void nvptx_neuter_pars(tree par, int modes, int outer) { for (int i = 0; i < par->num_children; i++) { tree child = par->children[i]; if (child->type == PARALLEL) { nvptx_neuter_pars(child, modes, outer); } } par->type = PARALLEL; par->num_children = 0; par->children = NULL; par->target_specific_value = nvptx_target_specific_value(modes, outer); }" GCC,nvptx,231, GCC,nvptx,232, GCC,nvptx,233,"void nvptx_output_aligned_decl(tree decl, const char *name, int align) { if (TREE_CODE(decl) == VAR_DECL && DECL_COMMON(decl)) { fprintf(asm_out_file, ""\t.comm %s, %d, %d\n"", name, align, align); } else if (TREE_CODE(decl) == VAR_DECL && DECL_FILE_SCOPE(decl)) { fprintf(asm_out_file, ""\t.file_scope %s, %d, %d\n"", name, align, align); } }" GCC,nvptx,234,"void nvptx_output_ascii(const char *str, size_t size) { (void)FILE; printf(""%.*s"", size, str); }" GCC,nvptx,235,"nvptx_output_barrier (rtx_insn *insn, rtx mem_operand, enum memmodel memmodel) { if (memmodel == MEMMODEL_RELAXED) return; rtx_insn *barrier = gen_rtx_MEMORY_BARRIER (VOIDmode, mem_operand); emit_insn_before (barrier, insn); }" GCC,nvptx,236,"nvptx_output_mov_insn (rtx *operands, int n) { int i; for (i = 0; i < n; i++) { if (GET_CODE (operands[i]) == REG) { fputs (""%.\tmov%t0\t%0, %1;"", asm_out_file); break; } else if (GET_CODE (operands[i]) == MEM) { fputs (""%.\tmov%t0\t%0, %1;"", asm_out_file); break; } else if (GET_CODE (operands[i]) == CONST_INT) { fputs (""%.\tmov.b%T0\t%0, %1;"", asm_out_file); break; } else if (GET_CODE (operands[i]) == CONST_DOUBLE) { fputs (""%.\tmov.u64\t%0.x, %L1;\n\t%.\tmov.u64\t%0.y, %H1;"", asm_out_file); break; } else if (GET_CODE (operands[i]) == CONST_VECTOR) { fputs (""%.\tmov.b%T0\t%0, %1;"", asm_out_file); break; } else if (GET_CODE (operands[i]) == SYMBOL_REF) { fputs (""%.\tmov.b%T0\t%0, %1;"", asm_out_file); break; }" GCC,nvptx,237,"void nvptx_output_return(struct gcc_target *target, struct gcc_insn *insn) { fprintf(target->asm_out_file, ""\tret;\n""); if (insn->operands[0].type == OPERAND_REG) { fprintf(target->asm_out_file, ""\tst.param%s\t[%s_out], %s;\n"", insn->operands[0].reg, insn->operands[0].reg, insn->operands[0].reg); } }" GCC,nvptx,238,"static void nvptx_output_set_softstack (rtx_insn *insn, rtx operands[], int *len) { rtx src_regno = operands[1]; rtx dest_regno = operands[0]; rtx stack_size = operands[2]; if (src_regno == dest_regno) { error (""%Jsoft stack pointer cannot be set to itself""); return; } if (stack_size == NULL_RTX) { error (""%Jsoft stack size must be specified""); return; } if (GET_CODE (stack_size) != CONST_INT) { error (""%Jsoft stack size must be a constant""); return; } if (INTVAL (stack_size) < 0) { error (""%Jsoft stack size must be non-negative""); return; } if (INTVAL (stack_size) > 0xffff) { error (""%Jsoft stack size must be less than or equal to 0xffff""); return; } if (GET_CODE (src_regno) != REG) { error (""%Jsoft stack pointer must be a register""); return; } if (GET_CODE (dest_regno) != REG) { error (""%Jsoft stack pointer must be a register""); return; } if (REGNO (src_regno) != REGNO (dest_regno)) { error (""%Jsoft stack pointer must be the same as the destination register""); return; } if (REGNO (src_regno) >= 32) { error (""%Jsoft stack pointer must be a general-purpose register""); return; }" GCC,nvptx,239,"void nvptx_output_simt_enter(int dest, int size, int align) { asm volatile(""mov.u32 %0, %1;"" : ""=r""(dest) : ""r""(size)); asm volatile(""mov.u32 %0, %1;"" : ""=r""(dest) : ""r""(align)); asm volatile(""nvptx_enter_simt_region %0, %1, %2;"" : ""=r""(dest) : ""r""(size), ""r""(align)); }" GCC,nvptx,240," struct omp_simt_enter_insn *omp_simt_enter_insn) { int src = omp_simt_enter_insn->src; gcc_plugin_output->output_asm(""nvptx_exit_simt_region"", src); }" GCC,nvptx,241,"void nvptx_output_skip(size_t size) { asm_out_file->write(NULL, size); }" GCC,nvptx,242,"void nvptx_output_softstack_switch(bool entering, int ptr, int size, int align) { if (entering) { cvta.local.u8 %r1, %simtstack_ar + %r1; sub.u8 %r1, %r1, %r1; and.b8 %r1, %r1, -%r1; st.u8 [%r1 + -%r1], %r1; } else { sub.u8 %r1, %r1, %r1; ld.u8 %r1, [%r1 + -%r1]; } }" GCC,nvptx,243,"void nvptx_output_unisimt_switch (bool entering) { if (entering) { fprintf (asm_out_file, ""\t.reg.u32 %%ustmp2;\n""); fprintf (asm_out_file, ""\tmov.u32 %%ustmp2, %d;\n"", 1); fprintf (asm_out_file, ""\tmov.pred %%r%d, %d;\n"", 0, 1); fprintf (asm_out_file, ""\tst.shared.u32 [%%r%d], %%ustmp2;\n"", 0); fprintf (asm_out_file, ""\tmov.u32 %%ustmp2, %%laneid;\n""); fprintf (asm_out_file, ""\tmov.u32 %%r%d, %s;\n"", 0, ""%ustmp2""); fprintf (asm_out_file, ""\tsetp.eq.u32 %%r%d, %%r%d, %%ustmp2;\n"", 0, 1); }" GCC,nvptx,244, GCC,nvptx,245,"nvptx_pc_set (rtx_insn *insn, int strict) { if (strict) { if (JUMP_P (insn)) { rtx_insn *target = JUMP_LABEL (insn); if (target) { rtx_insn *label = as_a (target); if (label) { nvptx_set_pc (label); return; } } }" GCC,nvptx,246,nvptx_preferred_simd_mode (enum machine_mode mode) { if (mode == VOIDmode) return VOIDmode; if (mode == QImode) return V16QImode; if (mode == HImode) return V8HImode; if (mode == SImode) return V4SImode; if (mode == DImode) return V2DImode; if (mode == TImode) return V1TImode; if (mode == OImode) return V1OImode; if (mode == CImode) return V1CImode; if (mode == XImode) return V1XImode; if (mode == PImode) return V1PImode; if (mode == SImode) return V4SImode; if (mode == DImode) return V2DImode; if (mode == TImode) return V1TImode; if (mode == OImode) return V1OImode; if (mode == CImode) return V1CImode; if (mode == XImode) return V1XImode; if (mode == PImode) return V1PImode; return VOIDmode; } GCC,nvptx,247,"void nvptx_print_address_operand (FILE *file, rtx x) { if (GET_CODE (x) == MEM) { rtx addr = XEXP (x, 0); if (GET_CODE (addr) == REG) { fprintf (file, ""%%%s"", reg_names[REGNO (addr)]); } else if (GET_CODE (addr) == PLUS) { rtx op0 = XEXP (addr, 0); rtx op1 = XEXP (addr, 1); if (GET_CODE (op0) == REG) { fprintf (file, ""%%%s"", reg_names[REGNO (op0)]); } else if (GET_CODE (op0) == CONST_INT) { fprintf (file, ""%d"", INTVAL (op0)); } else { fprintf (file, ""?""); } if (GET_CODE (op1) == REG) { fprintf (file, "" + %%%s"", reg_names[REGNO (op1)]); } else if (GET_CODE (op1) == CONST_INT) { fprintf (file, "" + %d"", INTVAL (op1)); } else { fprintf (file, "" + ?""); } } else { fprintf (file, ""?""); } } else { fprintf (file, ""?""); } }" GCC,nvptx,248,"void nvptx_print_operand_address(FILE *file, rtx addr) { if (REG_P(addr)) { fprintf(file, ""%s"", reg_names[REGNO(addr)]); } else { fprintf(file, ""0x%x"", (unsigned int) addr); } }" GCC,nvptx,249, const struct gcc_target_operand_punct *punct) { if (operand->type == NVPTX_OPERAND_TYPE_REGISTER) { if (punct->type == NVPTX_PUNCT_TYPE_REGISTER) { if (punct->reg == operand->reg) return true; } } return false; } GCC,nvptx,250, GCC,nvptx,251, GCC,nvptx,252, GCC,nvptx,253,"void nvptx_record_fndecl(tree decl) { if (TREE_CODE(decl) != FUNCTION_DECL) return; const char *name = IDENTIFIER_POINTER(DECL_NAME(decl)); if (htab_find(fndecl_hash, name) != NULL) return; fndecl_entry *entry = htab_find_slot(fndecl_hash, name, INSERT); if (entry == NULL) return; entry->name = name; entry->prototype = nvptx_prototype(decl); htab_insert(fndecl_hash, entry); }" GCC,nvptx,254,nvptx_record_libfunc (tree callee) { tree libfunc = libfuncs_lookup (callee); if (!libfunc) { libfunc = libfuncs_create (callee); libfuncs_insert (libfunc); } nvptx_emit_declaration (libfunc); } GCC,nvptx,255,"nvptx_record_needed_fndecl (tree decl) { if (!decl->decl_arguments) { decl->decl_arguments = NULL; return; } if (decl->decl_arguments) { nvptx_emit_decl (decl); } } ``` This function takes a `tree` argument named `decl`, which represents the declaration of a function or variable. It checks if the declaration has any argument information, and if it does, it emits a ptx decl for the declaration using the `nvptx_emit_decl` function. If the declaration does not have any argument information, it records the decl for later. You can use this function in the nvptx backend of the GCC Compiler by calling it whenever you need to record or emit a ptx decl for a function or variable. For example, you can call it in the `nvptx_finish_decl` function, which is called after a declaration has been processed, like this: ``` void nvptx_finish_decl (tree decl) { nvptx_record_needed_fndecl (decl); nvptx_emit_decl (decl); }" GCC,nvptx,256," const char *target_specific_value) { if (TREE_CODE (decl) == FUNCTION_DECL) { fprintf (mkoffload_file, ""//:FUNC_MAP \""%s\"""", name); fprintf (mkoffload_file, ""//:VAR_MAP \""%s\""\n"", target_specific_value); } else if (TREE_CODE (decl) == VAR_DECL) { fprintf (mkoffload_file, ""//:VAR_MAP \""%s\"""", name); fprintf (mkoffload_file, ""//:VAR_MAP \""%s\""\n"", target_specific_value); } else { return; } fprintf (mkoffload_file, ""\n""); }" GCC,nvptx,257,"static inline uint64_t nvptx_reduction_update(uint64_t *ptr, uint64_t var, uint64_t op) { uint64_t old_value = *ptr; uint64_t new_value = var; switch (op) { case NVPTX_REDUCTION_OP_ADD: new_value = old_value + var; break; case NVPTX_REDUCTION_OP_SUB: new_value = old_value - var; break; case NVPTX_REDUCTION_OP_MUL: new_value = old_value * var; break; case NVPTX_REDUCTION_OP_DIV: new_value = old_value / var; break; case NVPTX_REDUCTION_OP_MOD: new_value = old_value % var; break; case NVPTX_REDUCTION_OP_AND: new_value = old_value & var; break; case NVPTX_REDUCTION_OP_OR: new_value = old_value | var; break; case NVPTX_REDUCTION_OP_XOR: new_value = old_value ^ var; break; default: break; } *ptr = new_value; return new_value; }" GCC,nvptx,258,"void nvptx_reorg(void) { Instruction *insn = get_current_insn(); if (insn->has_subreg_operands()) { Operand *subreg_operands = insn->get_subreg_operands(); for (int i = 0; i < subreg_operands->size(); i++) { Operand *subreg_operand = subreg_operands->get(i); if (subreg_operand->is_register()) { Operand *reg_operand = subreg_operand->get_register(); if (reg_operand->is_subreg()) { Operand *subreg_operand = reg_operand->get_subreg(); Operand *new_reg_operand = new_reg_operand(subreg_operand->get_type(), subreg_operand->get_name()); insn->replace_operand(subreg_operand, new_reg_operand); insn->add_copyin_instruction(new_reg_operand, subreg_operand); insn->add_copyout_instruction(subreg_operand, new_reg_operand); } } } } }" GCC,nvptx,259,"void nvptx_reorg_subreg(void) { Instruction *insn = get_insn(); if (insn->has_subreg_operand()) { Operand *subreg_op = insn->get_subreg_operand(); int subreg_size = subreg_op->get_size(); Register *new_reg = new_register(subreg_size); insn->replace_operand(subreg_op, new_reg); Instruction *copyin_insn = new_instruction(INSN_COPYIN, new_reg, subreg_op); Instruction *copyout_insn = new_instruction(INSN_COPYOUT, subreg_op, new_reg); insn->add_instruction(copyin_insn); insn->add_instruction(copyout_insn); } }" GCC,nvptx,260,"void nvptx_reorg_uniform_simt(void) { if (nvptx_target_specific_value(nvptx, 1, 0, 0, 0)) { if (nvptx_target_specific_value(nvptx, 1, 0, 0, 1)) { nvptx_shuffle_propagation(); } } }" GCC,nvptx,261,"const char *nvptx_replace_dot(const char *name) { if (name == NULL || strchr(name, '.') == NULL) return NULL; char *new_name = strdup(name); char *p = new_name; while (*p != '\0') { if (*p == '.') *p = '$'; p++; } return new_name; }" GCC,nvptx,262, GCC,nvptx,263,"static void nvptx_sese_color(struct nvptx_region *region, uint32_t coloring) { struct nvptx_block *block; struct nvptx_block *successor; struct nvptx_block *entry; struct nvptx_block *exit; for (block = region->blocks; block != NULL; block = block->next) { block->color = coloring; for (successor = block->successors; successor != NULL; successor = successor->next) { if (successor->region != NULL) { entry = successor->region->entry; exit = successor->region->exit; if (entry != NULL && exit != NULL) { entry->color = coloring; exit->color = coloring; } } } } }" GCC,nvptx,264,"void nvptx_sese_number(struct nvptx_basic_block *bb, struct nvptx_basic_block *parent, int orientation) { if (bb->visited) { return; } bb->visited = true; if (parent) { nvptx_sese_number(parent, bb, orientation); } bb->number = 3 * bb->number + orientation; if (bb->children) { nvptx_sese_number(bb->children, bb, orientation); } }" GCC,nvptx,265,"nvptx_shared_propagate (rtx_insn *insn, int pre_p, int is_call) { if (pre_p) { nvptx_spill_live_state (insn, 1); } else { nvptx_fill_live_state (insn, 1); } nvptx_insert_sequence (insn, 1); return nvptx_propagate (insn, 1, is_call); }" GCC,nvptx,266, GCC,nvptx,267,"target_specific nvptx_skip_par(mask, forked, joining) { if (mask.empty()) return nvptx,1,0,1; if (forked < 0 || joining < 0 || forked >= joining) return nvptx,1,0,1; for (int i = forked; i < joining; i++) { if (mask[i]) continue; return nvptx,1,0,1; } return nvptx,1,0,1; }" GCC,nvptx,268,"nvptx_split_blocks (void) { basic_block bb = get_current_block (); basic_block new_bb = split_block (bb, get_current_insn ()); update_cfg (bb, new_bb); update_dominance (bb, new_bb); update_post_dominance (bb, new_bb); update_loop_tree (bb, new_bb); update_ssa (bb, new_bb); }" GCC,nvptx,269,nvptx_strict_argument_naming (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL && TREE_CODE (TREE_TYPE (decl)) == FUNCTION_TYPE && TYPE_STDARG (TREE_TYPE (decl))) { int num_named_args = 0; tree arg_type; for (arg_type = TYPE_ARG_TYPES (TREE_TYPE (decl)); arg_type; arg_type = TREE_CHAIN (arg_type)) { if (TREE_CODE (TREE_VALUE (arg_type)) == PARM_DECL) num_named_args++; } GCC,nvptx,270,"nvptx_truly_noop_truncation (rtx x, enum machine_mode mode) { if (mode == TImode) { x = convert_modes (TImode, SImode, x, 0); } else if (mode == DImode) { x = convert_modes (DImode, SImode, x, 0); } else { return; } SET_RTX_EXPR (x, mode); }" GCC,nvptx,271,"nvptx_unisimt_handle_set (rtx_insn *insn, rtx_insn *set) { rtx_insn *shuffle_insn; rtx_insn *shuffle_operands[2]; int master_lane; int current_lane; master_lane = get_master_lane (insn); current_lane = get_current_lane (insn); shuffle_insn = create_shuffle_insn (insn, set, master_lane, current_lane); emit_insn (shuffle_insn); }" GCC,nvptx,272,nvptx_use_anchors_for_symbol_p (tree decl) { if (!TARGET_ANCHORS) return false; if (TREE_CODE (decl) == FUNCTION_DECL) return false; if (TREE_CODE (decl) == VAR_DECL) return TARGET_ANCHORS; return false; } GCC,nvptx,273,static inline uintptr_t nvptx_vector_alignment(uintptr_t addr) { uintptr_t alignment = BIGGEST_ALIGNMENT; if (addr % alignment != 0) { alignment = addr % alignment; } return alignment; } GCC,nvptx,274,"nvptx_warp_propagate (rtx_insn *insn, rtx_insn *block) { rtx_insn *prev_insn = insn; rtx_insn *next_insn = insn->next; while (next_insn && next_insn->type == INSN_NVPTX_WARP_PROPAGATE) { nvptx_propagate (prev_insn, next_insn); prev_insn = next_insn; next_insn = next_insn->next; } }" GCC,nvptx,275,"void output_init_frag(struct frag *frag, const char *sym, int addend) { if (frag->fr_type == FR_INIT) { if (frag->fr_offset + frag->fr_size >= frag->fr_max_size) { fwrite(frag->fr_data, frag->fr_size, 1, frag->fr_file); frag->fr_offset = 0; frag->fr_size = 0; } } if (sym != NULL) { fprintf(frag->fr_file, ""%s"", sym); if (addend != 0) { fprintf(frag->fr_file, "" + %d"", addend); } } }" GCC,nvptx,276,"void output_reg(int reg, int subreg, int pair) { if (subreg == 0) { printf(""%%r%d"", reg); } else { printf(""%%r%d"", reg); if (pair == 1) { printf("",""); } printf(""%%r%d"", subreg); } if (pair == 1) { printf(""}""); } } ``` This function takes three arguments: `reg`, `subreg`, and `pair`. It outputs a register, subregister, or register pair (with optional enclosing braces) based on the values of these arguments. Here's an example of how you can use this function: ``` int main() { output_reg(1, 2, 0); output_reg(2, 3, 1); output_reg(3, 4, 1); output_reg(4, 5, 0); return 0; }" GCC,nvptx,277,"void parallel(void *parent, void *child) { child->parent = parent; child->next = NULL; child->prev = NULL; child->target_specific = 0x00000000; }" GCC,nvptx,278,"static inline bool pass_in_memory(int mode, int type) { if (mode == TARGET_SPECIFIC_VALUE) { if (type == TARGET_SPECIFIC_VALUE) { return false; } else { return true; } } else { return false; } }" GCC,nvptx,279,"bool predicate_insn(rtx insn, enum rtx_code cond, bool doit) { if (doit) { if (cond == COND_CODE_NONE) { insn->predicate = COND_CODE_NONE; } else { insn->predicate = cond; } } return insn->predicate == cond; }" GCC,nvptx,280,"void prevent_branch_around_nothing(void) { asm(""nop""); }" GCC,nvptx,281, GCC,nvptx,282,static inline void promote_return(uint64_t mode) { if (mode == NVPTX_TARGET_SPECIFIC_VALUE) { return; } switch (mode) { case 1: return 2; case 2: return 4; case 4: return 8; default: return 16; } } GCC,nvptx,283,"void push(int RN) { asm volatile(""Pushing backedge %d:%+d\n"", 0, RN); }" GCC,nvptx,284,"void remove(int n) { if (n == 0) { return; } int i = 0; while (i < n) { if (i % 2 == 0) { printf(""Removing backedge %d:%+d\n"", i, i + 1); } i++; } }" GCC,nvptx,285, GCC,nvptx,286,"section_for_sym (tree sym) { if (sym->nvptx_section) return sym->nvptx_section; if (sym->nvptx_section_info) { switch (sym->nvptx_section_info) { case NVPTX_SECTION_GLOBAL: return "".global""; case NVPTX_SECTION_SHARED: return "".shared""; case NVPTX_SECTION_LOCAL: return "".local""; case NVPTX_SECTION_CONST: return "".const""; case NVPTX_SECTION_PARAM: return "".param""; default: return NULL; } } return NULL; }" GCC,nvptx,287,"void shared_prop_gen(uint32_t *dest, uint32_t *src, uint32_t n) { uint32_t i; for (i = 0; i < n; i++) { dest[i] = src[i]; } }" GCC,nvptx,288,split_mode_p (machine_mode mode) { if (mode == VOIDmode) return false; if (mode == BLKmode) return true; if (mode == V4SImode || mode == V4SFmode) return true; if (mode == V8SImode || mode == V8SFmode) return true; if (mode == V16SImode || mode == V16SFmode) return true; if (mode == V32SImode || mode == V32SFmode) return true; return false; } GCC,nvptx,289,"static void verify_neutering_jumps(tree from_clause) { tree vector_jump, worker_jump; vector_jump = find_jump_in_clause(from_clause, ""VECTOR_""); worker_jump = find_jump_in_clause(from_clause, ""WORKER_""); if (vector_jump && worker_jump) { if (vector_jump->location > worker_jump->location) { error(""VECTOR_ jump must be before WORKER_ jump in FROM clause""); } } }" GCC,nvptx,290,void verify_neutering_labels(TO to) { if (to.contains(VECTOR_LABEL) && to.contains(WORKER_LABEL)) { int vector_label_pos = to.indexOf(VECTOR_LABEL); int worker_label_pos = to.indexOf(WORKER_LABEL); if (vector_label_pos < worker_label_pos) { to.remove(WORKER_LABEL); } else { to.remove(VECTOR_LABEL); } } } GCC,nvptx,291,"void warp_prop_gen(void) { asm volatile(""nvptx,0"" : : : ""memory""); }" GCC,nvptx,292,"void workaround_barsyncs(void) { asm volatile(""membar.cta"" ::: ""memory""); asm volatile(""bar.sync"" ::: ""memory""); asm volatile(""membar.cta"" ::: ""memory""); }" GCC,nvptx,293,"void workaround_uninit_method_1(void) { for (int i = 0; i < 10; i++) { printf(""Default init of reg %u inserted: insn %u\n"", i, i); } }" GCC,nvptx,294, GCC,nvptx,295,"typedef struct { uint32_t reg_num; uint32_t reg_value; } nvptx_reg_t; typedef struct { uint32_t edge_num; uint32_t edge_value; } nvptx_edge_t; typedef struct { uint32_t reg_num; uint32_t reg_value; uint32_t edge_num; uint32_t edge_value; } nvptx_reg_edge_t; void workaround_uninit_method_3(nvptx_reg_t *regs, nvptx_edge_t *edges, uint32_t num_regs, uint32_t num_edges) { uint32_t i, j; nvptx_reg_edge_t reg_edge; for (i = 0; i < num_regs; i++) { for (j = 0; j < num_edges; j++) { if (edges[j].edge_value == regs[i].reg_value) { reg_edge.reg_num = i; reg_edge.edge_num = j; reg_edge.edge_value = edges[j].edge_value; break; } } }" GCC,nvptx,296,"void write_arg_mode(int mode, int argno, int for_reg) { if (for_reg < 0) { printf("".param %s"", get_mode_name(mode)); if (argno > 0) { printf("", %s"", get_mode_name(argno)); } printf("";\n""); } else if (for_reg == 0) { printf(""\t.reg %s"", get_mode_name(mode)); if (argno > 0) { printf("", %s"", get_mode_name(argno)); } printf("";\n""); } else { printf(""\tld.param %s, [%s];\n"", get_mode_name(mode), get_mode_name(argno)); } }" GCC,nvptx,297,"int write_arg_type(tree type, int s, int for_reg, int argno, bool prototed) { if (TREE_CODE(type) == POINTER_TYPE) { write_arg_mode(type, s, for_reg, argno, prototed); write_arg_mode(TREE_TYPE(type), s, for_reg, argno + 1, prototed); return argno + 2; } write_arg_mode(type, s, for_reg, argno, prototed); return argno + 1; }" GCC,nvptx,298,"bool write_as_kernel(const char *attrs) { if (strstr(attrs, ""nvptx"") != NULL) { if (strstr(attrs, ""kernel"") != NULL || strstr(attrs, ""omp target entrypoint"") != NULL || strstr(attrs, ""oacc function"") != NULL) { return true; } } return false; }" GCC,nvptx,299,"void write_fn_marker(const char *name, const char *type, const char *target) { if (type == ""decl"") { fprintf(stderr, ""// BEGIN %s %s\n"", target, name); } else if (type == ""defn"") { fprintf(stderr, ""// BEGIN %s %s\n"", target, name); } else { fprintf(stderr, ""// BEGIN %s %s\n"", target, name); } }" GCC,nvptx,300,"void write_fn_proto(FILE *s, tree decl, const char *name) { if (decl->decl_kind == DECL_KERNEL) { fprintf(s, "".kernel %s\n"", name); } else if (decl->decl_kind == DECL_FUNCTION) { fprintf(s, "".func %s\n"", name); } fprintf(s, ""// %s\n"", name); }" GCC,nvptx,301, GCC,nvptx,302, GCC,nvptx,303,"void write_omp_entry(void *arg, char *stack, size_t stacksize) { __nvptx_stacks[tid.y] = stack + stacksize * (ctaid.x * ntid.y + tid.y + 1); __nvptx_uni[tid.y] = 0; gomp_nvptx_main(ORIG, arg); }" GCC,nvptx,304, GCC,nvptx,305,"bool write_return_type(tree type, bool is_prototype) { if (TREE_CODE(type) == VOID_TYPE) { return false; } if (TREE_CODE(type) == POINTER_TYPE) { if (TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) { return false; } if (is_prototype) { fprintf(asm_out_file, "" %s"", ""void""); } else { fprintf(asm_out_file, "" %s"", ""void*""); } return true; } if (TREE_CODE(type) == INTEGER_TYPE) { if (is_prototype) { fprintf(asm_out_file, "" %s"", ""int""); } else { fprintf(asm_out_file, "" %s"", ""int*""); } return true; } if (TREE_CODE(type) == REAL_TYPE) { if (is_prototype) { fprintf(asm_out_file, "" %s"", ""float""); } else { fprintf(asm_out_file, "" %s"", ""float*""); } return true; } if (TREE_CODE(type) == COMPLEX_TYPE) { if (is_prototype) { fprintf(asm_out_file, "" %s"", ""complex""); } else { fprintf(asm_out_file, "" %s"", ""complex*""); } return true; }" GCC,nvptx,306,"void write_shared_buffer(int alignment, const char* name, int size) { printf("".shared .align %d .u8 %s[%d];\n"", alignment, name, size); }" GCC,nvptx,307,"void write_var_marker(const char *name, const char *type) { if (type == ""GLOBAL"") { fprintf(stderr, ""// BEGIN%s VAR %s: GLOBAL\n"", name, type); } else if (type == ""DEF"") { fprintf(stderr, ""// BEGIN%s VAR %s: DEF\n"", name, type); } else if (type == ""DECL"") { fprintf(stderr, ""// BEGIN%s VAR %s: DECL\n"", name, type); } else { fprintf(stderr, ""// BEGIN%s VAR %s: %s\n"", name, type); } }" GCC,riscv,0,"void riscv_atomic_assign_expand_fenv(fenv_t *env, int reg, int value) { riscv_atomic_assign_expand(env, reg, value); }" GCC,riscv,1, GCC,riscv,2,"riscv_expand_builtin (tree exp, rtx target) { if (TREE_CODE (exp) == CALL_EXPR && TREE_CODE (TREE_OPERAND (exp, 0)) == IDENTIFIER_NODE && TREE_OPERAND (exp, 0) == riscv_vector_builtin_identifier) { tree vec_op = TREE_OPERAND (exp, 1); tree op1 = TREE_OPERAND (vec_op, 0); tree op2 = TREE_OPERAND (vec_op, 1); rtx result = riscv_expand_vector_operation (op1, op2, target); return result; } return default_expand_builtin (exp, target); }" GCC,riscv,3," tree arglist) { if (fndecl == riscv_builtin_decl (RISCV_BUILTIN_VEC_ADD)) { tree arg0 = gimple_call_arg (gsi, 0); tree arg1 = gimple_call_arg (gsi, 1); tree type = TREE_TYPE (arg0); if (TREE_CODE (type) == VECTOR_TYPE && TREE_CODE (TREE_TYPE (arg0)) == INTEGER_TYPE && TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE) { tree res = build_int_cst (type, 0); tree len = TYPE_VECTOR_SUBPARTS (type); for (int i = 0; i < len; i++) { tree elt0 = gimple_call_arg (gsi, i); tree elt1 = gimple_call_arg (gsi, i + len); tree sum = build2 (PLUS_EXPR, TREE_TYPE (elt0), elt0, elt1); res = build2 (COMPOUND_EXPR, type, res, sum); } return res; } } return riscv_vector::gimple_fold_builtin (gsi, fndecl, arglist); }" GCC,riscv,4,void riscv_init_builtins(void) { riscv_vector::init_builtins(); TARGET_INIT_BUILTINS = riscv; } GCC,riscv,5, GCC,riscv,6,"void riscv_atomic_assign_expand_fenv(fenv_t *env, int reg, int value) { riscv_atomic_assign_expand(env, reg, value); }" GCC,riscv,7,riscv_build_function_type (tree type) { enum riscv_function_type result = RISCV_FUNCTION_TYPE_NORMAL; if (TREE_CODE (type) == FUNCTION_TYPE && TYPE_RETURN_MODE (type) == RISCV_RETURN_MODE_ABI_COMPLIANT) { if (TREE_CODE (TREE_TYPE (type)) == RISCV_RETURN_TYPE_ABI_COMPLIANT) result = RISCV_FUNCTION_TYPE_ABI_COMPLIANT; } return result; } GCC,riscv,8,"void riscv_builtin_decl(tree decl) { if (TREE_CODE(decl) == FUNCTION_DECL && DECL_BUILT_IN(decl)) { const char *name = IDENTIFIER_POINTER(DECL_NAME(decl)); if (strcmp(name, ""__builtin_riscv_"") == 0) { riscv_builtin_handle(decl); } } } void riscv_builtin_handle(tree decl) { }" GCC,riscv,9, GCC,riscv,10," tree target) { tree fn = CALL_EXPR_FN (exp); tree args = CALL_EXPR_ARGS (exp); tree result; if (has_target_p) { result = riscv_expand_builtin_direct_target (fn, args, target); } else { result = riscv_expand_builtin_direct_default (fn, args); } if (TREE_CODE (result) != ERROR_MARK) return result; return exp; }" GCC,riscv,11, GCC,riscv,12,void riscv_init_builtins(void) { riscv_builtins_init(); riscv_target_specific_value = riscv_0; } GCC,riscv,13,"void riscv_prepare_builtin_arg(int argno, tree exp, expand_operand *op) { if (TREE_CODE(exp) == REGISTER_DECL) { op->type = EXPAND_OPERAND_REGISTER; op->reg = TREE_REGISTER(exp); return; } if (TREE_CODE(exp) == INTEGER_CST) { op->type = EXPAND_OPERAND_CONSTANT; op->constant = TREE_INT_CST_LOW(exp); return; } if (TREE_CODE(exp) == SYMBOL_REF) { op->type = EXPAND_OPERAND_REGISTER; op->reg = riscv_load_symbol(exp); return; } error(""unsupported argument type in riscv_prepare_builtin_arg""); }" GCC,riscv,14,"riscv_check_builtin_call (tree exp, tree target) { if (TREE_CODE (exp) != CALL_EXPR) return false; tree callee = CALL_EXPR_FN (exp); if (TREE_CODE (callee) != FUNCTION_DECL) return false; tree builtin = DECL_BUILT_IN_CLASS (callee); if (builtin == RISCV_BUILTIN_VECTOR) { tree vector_type = TREE_TYPE (TREE_TYPE (callee)); if (vector_type == NULL_TREE) return false; tree arg = CALL_EXPR_ARG (exp, 0); if (TREE_CODE (arg) != VECTOR_TYPE) return false; tree element_type = TREE_TYPE (arg); if (element_type == NULL_TREE) return false; if (TREE_CODE (element_type) != INTEGER_TYPE && TREE_CODE (element_type) != FLOAT_TYPE) return false; return true; } return false; }" GCC,riscv,15, GCC,riscv,16,"static void riscv_pragma_intrinsic(const char *option) { if (strcmp(option, ""vector"") == 0) { riscv_vector::handle_pragma_vector(); } else if (strcmp(option, ""enabled"") == 0) { } else { error(""unknown #pragma riscv intrinsic option %qs"", option); } }" GCC,riscv,17, GCC,riscv,18,"static void riscv_cpu_cpp_builtins(void) { TARGET_CPU_CPP_BUILTINS(riscv, ""__riscv"", ""__riscv_compressed"", ""__riscv_atomic"", ""__riscv_mul"", ""__riscv_div"", ""__riscv_muldiv"", ""__riscv_xlen"", 8, ""__riscv_flen"", 8, ""__riscv_fdiv"", ""__riscv_fsqrt"", ""__riscv_float_abi_soft"", ""__riscv_float_abi_single"", ""__riscv_float_abi_double"", ""__riscv_cmodel_medlow"", ""__riscv_cmodel_medany"", ""__riscv_cmodel_pic""); }" GCC,riscv,19, GCC,riscv,20,"riscv_d_handle_target_float_abi (void *gcc_target_info) { struct gcc_target_info *target_info = (struct gcc_target_info *)gcc_target_info; target_info->float_abi = ""riscv""; target_info->soft_float = ""soft""; target_info->single_float = ""single""; target_info->double_float = ""double""; target_info->float_abi_version = 1; } ``` This function takes a `void *` pointer to a `gcc_target_info` structure as its argument, and it sets the `float_abi`, `soft_float`, `single_float`, `double_float`, and `float_abi_version` fields of the structure to the appropriate values for the RISC-V target. You can then call this function from the `riscv_d_handle_target` function, which is responsible for handling the ` __traits ( getTargetInfo , `` floatAbi '' ) ' call. ``` static void riscv_d_handle_target (void *gcc_target_info) { riscv_d_handle_target_float_abi (gcc_target_info); ... }" GCC,riscv,21,"riscv_d_register_target_info (void) { static const struct riscv_target_info riscv_target_info = { .float_abi = ""floatAbi"", }; riscv_target_info.riscv_d_register_target_info = &riscv_target_info; } ``` This function is called by the GCC backend to register the target-specific information for the RISC-V architecture. It sets the `float_abi` field of the `riscv_target_info` structure to the string ""floatAbi"", which indicates that the target supports the floating-point ABI. You can then use this function in your GCC backend to register the target-specific information for the RISC-V architecture. For example, you could call it in the `riscv_init_target` function, like this: ``` void riscv_init_target (void) { riscv_d_register_target_info (); }" GCC,riscv,22,"static void riscv_d_target_versions(void) { const char *riscv_d_target_versions[] = { ""RISCV64"", ""RISCV32"", ""D_HardFloat"", ""D_SoftFloat"" }; riscv_d_target_versions[0] = ""RISCV64""; riscv_d_target_versions[1] = ""RISCV32""; riscv_d_target_versions[2] = ""D_HardFloat""; riscv_d_target_versions[3] = ""D_SoftFloat""; }" GCC,riscv,23,"uint32_t calculate_x_in_sequence(uint32_t x, uint32_t y) { uint32_t result = 0; result = (x << 1) + y; if (result >= 0x100000000) { result -= 0x100000000; } return result; }" GCC,riscv,24,void riscv_run_selftests(void) { riscv_selftest_memory_access(); riscv_selftest_branch_prediction(); riscv_selftest_register_access(); riscv_selftest_instruction_decode(); riscv_selftest_instruction_execute(); } GCC,riscv,25,"void analyze(rtx_insn *insn) { int i; int regno; int count = 0; for (i = 0; i < XVECLEN(insn, 0); i++) { rtx_insn *x = XVECEXP(insn, 0, i); if (GET_CODE(x) == MEM) { regno = REGNO(XEXP(x, 0)); if (regno >= 0 && regno < 32) { count++; } } } if (count > 0) { fprintf(stderr, ""Counted %d times\n"", count); } }" GCC,riscv,26,void execute(void) { const int riscv = 0; initialize(); perform_pass(); cleanup(); } GCC,riscv,27,"transform (rtx_insn *insn) { rtx base_reg = XEXP (insn, 0); rtx offset = XEXP (insn, 1); rtx new_base_reg = NULL_RTX; rtx new_offset = NULL_RTX; if (REG_N_REFS (base_reg) < 4) return; if (GET_CODE (offset) != CONST_INT || INTVAL (offset) < 0 || INTVAL (offset) >= 0x10000) return; new_base_reg = gen_rtx_REG (SImode, REGNO (base_reg)); new_offset = gen_rtx_CONST_INT (SImode, INTVAL (offset) & 0xffff); XEXP (insn, 0) = new_base_reg; XEXP (insn, 1) = new_offset; }" GCC,riscv,28,"check_for_no_return_call (rtx_insn *prologue) { if (prologue->next->code == CALL_INSN && CALL_INSN_FUNCTION_USAGE (prologue->next) == FUNCTION_USAGE_NORETURN) { remove_insn (prologue); return; } return; } ``` This function takes a single argument, `prologue`, which is the instruction that matches the prologue pattern. It checks if the next instruction after the prologue is a call to a no-return function. If it is, it removes the prologue instruction. You can use this function in the `riscv_remove_unneeded_save_restore_calls` function to check if the prologue is followed by a call to a no-return function. If it is, you can remove the prologue and the call to the no-return function, which should simplify the code and make it easier to optimize. Here is an example of how you could use this function in the `riscv_remove_unneeded_save_restore_calls` function: ``` static void riscv_remove_unneeded_save_restore_calls (rtx_insn *insn) { if (insn->code == CALL_INSN && CALL_INSN_FUNCTION_USAGE (insn) == FUNCTION_USAGE_PROLOGUE) { check_for_no_return_call (insn); }" GCC,riscv,29,static void riscv_sr_match_epilogue(rtx_insn *insn) { rtx_insn *epilogue_insn; rtx_insn *prev_insn; rtx_insn *next_insn; epilogue_insn = insn; while (epilogue_insn && !RTX_INSN_EPILOGUE_P(epilogue_insn)) epilogue_insn = PREV_INSN(epilogue_insn); if (!epilogue_insn) return NULL; prev_insn = PREV_INSN(epilogue_insn); next_insn = NEXT_INSN(epilogue_insn); if (prev_insn && RTX_INSN_TYPE(prev_insn) == RTX_INSN_TYPE_RETURN) { return epilogue_insn; } else if (next_insn && RTX_INSN_TYPE(next_insn) == RTX_INSN_TYPE_RETURN) { return epilogue_insn; } else { return NULL; } } GCC,riscv,30,"riscv_sr_match_prologue (rtx_insn *body, rtx_insn *insn) { rtx_insn *note_insn_basic_block = NULL; rtx_insn *note_insn_prologue_end = NULL; rtx_insn *gpr_save_insn = NULL; for (rtx_insn *insn = body; insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == NOTE && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK) { note_insn_basic_block = insn; break; } } for (rtx_insn *insn = note_insn_basic_block; insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == NOTE && NOTE_KIND (insn) == NOTE_INSN_PROLOGUE_END) { note_insn_prologue_end = insn; break; } }" GCC,riscv,31,"apply_mask_policy_p (rtx op, rtx mask) { if (mask) { op = gen_rtx_fmt_ee (GET_CODE (op), VOIDmode, op, mask); } emit_insn (op); }" GCC,riscv,32,"apply_tail_policy_p (rtx_insn *insn) { if (GET_CODE (insn) == CALL_INSN) return true; if (GET_CODE (insn) == JUMP_INSN) return true; if (GET_CODE (insn) == RETURN_INSN) return true; return false; } ``` This function takes an `rtx_insn` pointer as an argument, which represents an instruction in the RISC-V instruction set. It checks the instruction's opcode to determine if it is a call, jump, or return instruction, and if so, returns `true` to indicate that the tail policy operand should be applied. Otherwise, it returns `false`. You can use this function in the RISC-V backend of GCC by calling it in the appropriate places, such as in the `riscv_expand_insn` function, which is responsible for expanding instructions into RISC-V machine code. ```c #include ""config.h"" #include ""system.h"" #include ""riscv.h"" static void riscv_expand_insn (rtx_insn *insn) { if (apply_tail_policy_p (insn)) { ... } else { ... } }" GCC,riscv,33,"static inline void apply_vl_p(uint32_t vl) { riscv_set_vl(vl); } ``` This function takes a single argument `vl`, which is the value of the `vl` operand to be applied. It uses the `riscv_set_vl` function to set the `vl` operand to the specified value. You can use this function in your RISC-V backend code to apply the `vl` operand by default. For example: ```c #include #include int main() { apply_vl_p(16); return 0; }" GCC,riscv,34," int *n_variants, int *n_duplicates) { int i; int n_operands_ = 0; int n_alternatives_ = 0; int n_variants_ = 0; int n_duplicates_ = 0; for (i = 0; i < XVECLEN (insn, 0); i++) { rtx_insn *operand = XVECEXP (insn, 0, i); if (operand == NULL) continue; if (GET_CODE (operand) == OPERAND) { n_operands_++; if (GET_CODE (XEXP (operand, 0)) == OPERAND) n_alternatives_++; } else if (GET_CODE (operand) == VARIANT) { n_variants_++; if (GET_CODE (XEXP (operand, 0)) == OPERAND) n_alternatives_++; }" GCC,riscv,35,"can_be_overloaded_p (void) { return true; } ``` This function simply returns the value `true` by default, indicating that most intrinsics can be overloaded. You can use this function in the RISC-V backend of GCC by defining it in a header file that is included in the backend's source code. For example, you could define it in a file called `riscv-intrinsics.h`: ``` #ifndef RISCV_INTRINSICS_H #define RISCV_INTRINSICS_H static bool can_be_overloaded_p (void) { return true; } #endif /* RISCV_INTRINSICS_H */ ``` Then, in the backend's source code, you can include this header file and use the `can_be_overloaded_p` function as needed: ``` #include ""riscv-intrinsics.h"" ... if (can_be_overloaded_p()) { } else { }" GCC,riscv,36,"expand (rtx call) { rtx lhs = XEXP (call, 0); rtx rhs = XEXP (call, 1); return lhs; }" GCC,riscv,37,"fold (gimple_stmt_iterator *gsi, tree arg1, tree arg2) { tree new_vl = NULL_TREE; tree new_stmt = NULL_TREE; if (gimple_call_builtin_p (arg1, BUILT_IN_RISCV_1_0_2_2)) { new_vl = gimple_call_arg (arg1, 0); if (new_vl && TREE_CODE (new_vl) == REGISTER_DECL && REGISTER_DECL_REGISTER_CLASS (new_vl) == RISCV_REGISTER_CLASS_VL) { new_stmt = gimple_build_assign (new_vl, arg2); gimple_replace_stmt (gsi, new_stmt); return new_stmt; } } return NULL_TREE; }" GCC,riscv,38,has_merge_operand_p (rtx_insn *insn) { return true; } GCC,riscv,39,bool use_mask_predication_p(void) { return true; } GCC,riscv,40,"bool build(insn_uid_t insn_uid) { chain_t chain = {insn_uid, NULL}; while (true) { insn_uid_t next_insn_uid = chain.insn_uid; if (is_use(next_insn_uid)) { chain.uses.push_back(next_insn_uid); } else if (is_definition(next_insn_uid)) { chain.definitions.push_back(next_insn_uid); } else { return false; } if (chain.uses.empty() && chain.definitions.empty()) { return true; } next_insn_uid = chain.uses.back(); if (is_use(next_insn_uid)) { chain.uses.push_back(next_insn_uid); } else if (is_definition(next_insn_uid)) { chain.definitions.push_back(next_insn_uid); } else { return false; } }" GCC,riscv,41,"build_all (void) { tree group = get_group (); tree operand_suffixes = get_operand_suffixes (); tree type_suffixes = get_type_suffixes (); tree predication_suffixes = get_predication_suffixes (); for (tree operand : operand_suffixes) { for (tree type : type_suffixes) { for (tree predication : predication_suffixes) { tree name = build_function_name (group, operand, type, predication); tree function = build_function (name, group, operand, type, predication); apply_predication (function, predication); } } } }" GCC,riscv,42,"void build_one(int group, int oi, int pair, int bi, int pred_idx) { riscv_insn insn; insn.opcode = riscv_opcode_add; insn.rd = group; insn.rs1 = oi; insn.rs2 = pair; insn.bi = bi; insn.pred_idx = pred_idx; insn.target_specific = riscv_target_specific_21; riscv_build_insn(&insn); }" GCC,riscv,43,"bool check(tree *call) { if (call->type != RISCV_CALL) { error(""invalid call""); return false; } if (call->target_specific_value != RISCV_0_1_0_1) { error(""invalid target specific value""); return false; } if (call->recursive) { error(""recursive call""); return false; } if (call->function->type == RISCV_FUNCTION_UNDEFINED) { error(""call to undefined function""); return false; } if (call->function->type == RISCV_FUNCTION_EXPORTED) { error(""call to exported function""); return false; } if (call->function->type == RISCV_FUNCTION_IMPORTED) { error(""call to imported function""); return false; } if (call->function->type == RISCV_FUNCTION_BUILTIN) { error(""call to built-in function""); return false; } if (call->function->type == RISCV_FUNCTION_USER_DEFINED) { error(""call to user-defined function""); return false; }" GCC,riscv,44,"get_name (tree instance, bool overloaded_p) { const char *name; if (overloaded_p) name = ""__riscv_""; else name = ""vle""; return name; }" GCC,riscv,45,"add_all_one_mask_operand (rtx op, rtx mask) { if (GET_CODE (op) == CONST_INT) { op = plus_constant (op, mask); } else { rtx new_op = gen_rtx_PLUS (GET_MODE (op), op, mask); op = new_op; } XEXP (op, 0) = op; }" GCC,riscv,46,"add_attribute (const char *name, tree attrs) { if (name == NULL || attrs == NULL) return; if (strcmp (name, ""riscv"") == 0) { tree attr = build_tree_list (NULL_TREE, build_string (name)); TREE_CHAIN (attr) = attrs; attrs = attr; } tree_add_attribute (attrs, name); }" GCC,riscv,47,"add_fixed_operand (rtx_insn *insn, rtx operand) { if (MEM_P (operand) && MEM_INVALID_P (operand)) { rtx addr = XEXP (operand, 0); rtx new_addr = gen_rtx_MEM (GET_MODE (operand), addr); MEM_INVALID_P (new_addr) = 0; XEXP (operand, 0) = new_addr; } add_reg_note (insn, REG_EQUAL, operand); }" GCC,riscv,48,"add_function (tree fndecl, tree fntype, tree attrs, tree instance) { } int plugin_init (struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) { struct register_pass_info pass_info; pass_info.pass = add_function; pass_info.reference_pass_name = ""riscv""; pass_info.ref_pass_instance_number = 1; pass_info.pos_op = PASS_POS_INSERT_BEFORE; register_callback (pass_gate, NULL, &pass_info); return 0; }" GCC,riscv,49,"void add_input_operand (rtx op, rtx *m_ops, int *opno) { rtx new_op = gen_rtx_REG (op->mode, op->reg); m_ops[*opno] = new_op; (*opno)++; }" GCC,riscv,50,"add_integer_operand (rtx x, rtx y) { rtx result = gen_rtx_PLUS (SImode, x, y); emit_insn (result); }" GCC,riscv,51, GCC,riscv,52,"void add_output_operand (rtx operand, int opno) { rtx output_operand = gen_rtx_REG (operand->mode, opno); M_OPS = gen_rtx_EXPR_LIST (operand, M_OPS); opno++; }" GCC,riscv,53,"add_scalar_move_mask_operand (rtx op, rtx mask) { if (GET_CODE (op) == REG) { rtx reg = op; rtx mask_reg = gen_rtx_REG (GET_MODE (reg), mask); emit_insn (gen_rtx_SET (reg, mask_reg)); } else if (GET_CODE (op) == MEM) { rtx mem = op; rtx mask_mem = gen_rtx_MEM (GET_MODE (mem), mask); emit_insn (gen_rtx_SET (mem, mask_mem)); } else { } }" GCC,riscv,54," tree target_specific_value) { tree function = build_function (name, argument_types, return_type, target_specific_value); add_builtin_function (function, overload_name); }" GCC,riscv,55,"add_vector_type_attribute (tree type) { tree attrs = TYPE_ATTRIBUTES (type); tree riscv_attr = build_tree_list (get_identifier (""riscv""), build_string (strlen (""RVV type""), ""RVV type"")); TREE_CHAIN (attrs) = riscv_attr; }" GCC,riscv,56," enum rtx_code code, int *undef_p) { if (code == UNKNOWN) return; if (mode == VOIDmode) { if (REG_P (operand)) { if (REGNO (operand) >= FIRST_VIRTUAL_REGISTER) { rtx undef = gen_rtx_UNKNOWN (mode); XVECEXP (operand, 0, 0) = undef; *undef_p = 1; } } else if (GET_CODE (operand) == MEM) { if (MEM_VIRTUAL_P (operand)) { rtx undef = gen_rtx_UNKNOWN (mode); XVECEXP (operand, 0, 0) = undef; *undef_p = 1; } } } }" GCC,riscv,57,"allocate_argument_types (tree fndecl) { tree parm; tree arg_types; tree arg_type; int i; if (fndecl->decl_arguments == NULL_TREE) return; arg_types = make_tree_vec (fndecl->decl_arguments->length); for (i = 0; i < fndecl->decl_arguments->length; i++) { parm = TREE_VALUE (fndecl->decl_arguments->value[i]); arg_type = TREE_TYPE (parm); if (TREE_CODE (parm) == REGISTER_DECL) { arg_type = riscv_alloc_reg_type (arg_type); } else { arg_type = riscv_alloc_mem_type (arg_type); } TREE_VEC_ELT (arg_types, i) = arg_type; } fndecl->decl_arguments->type = arg_types; }" GCC,riscv,58,"append_base_name (const char *name) { char *base_name = concat (""__riscv_"", name); riscv_target_specific_value = concat (riscv_target_specific_value, base_name); }" GCC,riscv,59,"void append_name(const char *name) { const char *current_name = riscv_current_function_name(); char *new_name = malloc(strlen(current_name) + strlen(name) + 1); strcpy(new_name, current_name); strcat(new_name, name); riscv_set_function_name(new_name); free(new_name); }" GCC,riscv,60,"void append_sew(int sew) { char sew_str[10]; sprintf(sew_str, ""sew%d"", sew); __asm__(""append_sew %0"" : ""=r""(sew_str)); } int main() { append_sew(8); append_sew(16); append_sew(32); append_sew(64); return 0; }" GCC,riscv,61,"apply_mask_policy_p (rtx op, rtx mask) { if (mask) { op = gen_rtx_fmt_ee (GET_CODE (op), VOIDmode, op, mask); } emit_insn (op); }" GCC,riscv,62,apply_predication (tree argument_types) { if (TREE_CODE (argument_types) == PREDICATE_TYPE) { argument_types = riscv_apply_predicate (argument_types); } TREE_TYPE (argument_types) = argument_types; } GCC,riscv,63,"apply_tail_policy_p (rtx_insn *insn) { if (GET_CODE (insn) == CALL_INSN) return true; if (GET_CODE (insn) == JUMP_INSN) return true; if (GET_CODE (insn) == RETURN_INSN) return true; return false; } ``` This function takes an `rtx_insn` pointer as an argument, which represents an instruction in the RISC-V instruction set. It checks the instruction's opcode to determine if it is a call, jump, or return instruction, and if so, returns `true` to indicate that the tail policy operand should be applied. Otherwise, it returns `false`. You can use this function in the RISC-V backend of GCC by calling it in the appropriate places, such as in the `riscv_expand_insn` function, which is responsible for expanding instructions into RISC-V machine code. ```c #include ""config.h"" #include ""system.h"" #include ""riscv.h"" static void riscv_expand_insn (rtx_insn *insn) { if (apply_tail_policy_p (insn)) { ... } else { ... } }" GCC,riscv,64,"static inline void apply_vl_p(uint32_t vl) { riscv_set_vl(vl); } ``` This function takes a single argument `vl`, which is the value of the `vl` operand to be applied. It uses the `riscv_set_vl` function to set the `vl` operand to the specified value. You can use this function in your RISC-V backend code to apply the `vl` operand by default. For example: ```c #include #include int main() { apply_vl_p(16); return 0; }" GCC,riscv,65, GCC,riscv,66,"build_const_pointer (tree type, tree val) { tree ptr_type = build_pointer_type (type); tree const_ptr_type = build_pointer_type (build_qualified_type (type, TYPE_QUAL_CONST)); tree const_ptr = build_tree_list (NULL_TREE, const_ptr_type); TREE_TYPE (const_ptr) = const_ptr_type; tree ptr = build_tree_list (NULL_TREE, ptr_type); TREE_TYPE (ptr) = ptr_type; tree const_val = build_tree_list (NULL_TREE, val); TREE_TYPE (const_val) = const_ptr_type; tree const_ptr_val = build_tree_list (NULL_TREE, const_ptr); TREE_TYPE (const_ptr_val) = const_ptr_type; return const_ptr_val; }" GCC,riscv,67,builtin_decl (tree code) { tree decl; if (code == error_mark_node) return error_mark_node; decl = riscv_builtin_decl (code); if (decl == NULL) return error_mark_node; return decl; } GCC,riscv,68, GCC,riscv,69,"can_be_overloaded_p (void) { return true; } ``` This function simply returns the value `true` by default, indicating that most intrinsics can be overloaded. You can use this function in the RISC-V backend of GCC by defining it in a header file that is included in the backend's source code. For example, you could define it in a file called `riscv-intrinsics.h`: ``` #ifndef RISCV_INTRINSICS_H #define RISCV_INTRINSICS_H static bool can_be_overloaded_p (void) { return true; } #endif /* RISCV_INTRINSICS_H */ ``` Then, in the backend's source code, you can include this header file and use the `can_be_overloaded_p` function as needed: ``` #include ""riscv-intrinsics.h"" ... if (can_be_overloaded_p()) { } else { }" GCC,riscv,70,"bool check(tree *call) { if (!riscv_valid_instruction(call)) { error(""Invalid RISC-V instruction""); return false; } if (!riscv_valid_register(call)) { error(""Invalid RISC-V register""); return false; } if (!riscv_valid_immediate(call)) { error(""Invalid RISC-V immediate""); return false; } if (!riscv_valid_addressing_mode(call)) { error(""Invalid RISC-V addressing mode""); return false; } if (!riscv_valid_instruction_sequence(call)) { error(""Invalid RISC-V instruction sequence""); return false; } return true; }" GCC,riscv,71," int code) { if (TREE_CODE (fndecl) != FUNCTION_DECL || !DECL_BUILT_IN_CLASS (fndecl) || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD) return false; if (nargs != 1) { error_at (location, ""wrong number of arguments to %s"", IDENTIFIER_POINTER (DECL_NAME (fndecl))); return false; } if (!integer_constant_expression_p (args[0])) { error_at (location, ""argument to %s must be an integer constant expression"", IDENTIFIER_POINTER (DECL_NAME (fndecl))); return false; } if (!riscv_builtin_sve_p (fndecl)) { error_at (location, ""invalid SVE function %s"", IDENTIFIER_POINTER (DECL_NAME (fndecl))); return false; } if (!riscv_builtin_sve_code_p (fndecl, code)) { error_at (location, ""invalid SVE function %s with code %d"", IDENTIFIER_POINTER (DECL_NAME (fndecl)), code); return false; }" GCC,riscv,72,check_required_extensions (void) { int i; for (i = 0; i < RVV_REQUIRE_COUNT; i++) { if (!riscv_target_specific_value_enabled (RVV_REQUIRE_EXTENSIONS[i])) return false; } return true; } GCC,riscv,73,"bool could_trap_p(tree *fn) { if (TREE_CODE(fn) == FUNCTION_DECL && lookup_attribute(""trap"", DECL_ATTRIBUTES(fn))) { return true; } if (TREE_CODE(fn) == FUNCTION_DECL && lookup_attribute(""trap_handler"", DECL_ATTRIBUTES(fn))) { return true; } if (TREE_CODE(fn) == FUNCTION_DECL && lookup_attribute(""trap_handler"", DECL_ATTRIBUTES(fn)) && !lookup_attribute(""default_trap_handler"", DECL_ATTRIBUTES(fn))) { return true; } if (TREE_CODE(fn) == FUNCTION_DECL && lookup_attribute(""trap_handler"", DECL_ATTRIBUTES(fn)) && !lookup_attribute(""default_trap_handler"", DECL_ATTRIBUTES(fn)) && !lookup_attribute(""empty_trap_handler_list"", DECL_ATTRIBUTES(fn))) { return true; } return false; }" GCC,riscv,74,"int equal(riscv_reg_t h1, riscv_reg_t h2) { return (h1 == h2); }" GCC,riscv,75,"expand (rtx call) { rtx lhs = XEXP (call, 0); rtx rhs = XEXP (call, 1); return lhs; }" GCC,riscv,76,"expand_builtin (tree exp, tree target, int code) { switch (code) { case RVV_ADD: return expand_builtin_add (exp, target); case RVV_SUB: return expand_builtin_sub (exp, target); case RVV_MUL: return expand_builtin_mul (exp, target); case RVV_DIV: return expand_builtin_div (exp, target); case RVV_REM: return expand_builtin_rem (exp, target); case RVV_AND: return expand_builtin_and (exp, target); case RVV_OR: return expand_builtin_or (exp, target); case RVV_XOR: return expand_builtin_xor (exp, target); case RVV_SHL: return expand_builtin_shl (exp, target); case RVV_SHR: return expand_builtin_shr (exp, target); case RVV_SAR: return expand_builtin_sar (exp, target); case RVV_ROL: return expand_builtin_rol (exp, target); case RVV_ROR: return expand_builtin_ror (exp, target); default: return NULL_TREE; } }" GCC,riscv,77,void finish_name (void) { char *name = get_name (); if (name) { name[0] = '\0'; name[1] = '\0'; } } GCC,riscv,78,"fold (tree_node *node) { tree folded_node; if (TREE_CODE (node) == CALL_EXPR) { tree callee = CALL_EXPR_FN (node); tree args = CALL_EXPR_ARGS (node); if (TREE_CODE (callee) == FUNCTION_DECL) { tree new_callee = riscv_fold_call (callee, args); if (new_callee) { folded_node = build_call_expr (new_callee, args); return folded_node; } } } return NULL_TREE; }" GCC,riscv,79,bool function_returns_void_p (tree fndecl) { if (TREE_CODE (fndecl) == FUNCTION_DECL && TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node) return false; if (TREE_CODE (fndecl) == FUNCTION_DECL && TREE_TYPE (TREE_TYPE (fndecl)) == riscv_specific_function_type_node) return true; return false; } GCC,riscv,80,"uint32_t generate_insn(uint32_t opcode, uint32_t operand1, uint32_t operand2) { uint32_t insn = 0; if (opcode == 0 || operand1 == 0 || operand2 == 0) { return 0; } insn = (opcode << 26) | (operand1 << 21) | (operand2 << 16); return operand1; }" GCC,riscv,81,"static const char *get_attributes(void) { return ""riscv,pure,const,nothrow,leaf""; }" GCC,riscv,82,get_mask_policy_for_pred (enum riscv_pred_type pred) { if (pred == RISCV_PRED_MU) return RISCV_MASK_POLICY_MU; else return RISCV_MASK_POLICY_DEFAULT; } GCC,riscv,83,get_tail_policy_for_pred(enum riscv_predicate pred) { if (pred == RISCV_PREDICATE_TU) return RISCV_TAIL_POLICY_TU; else return RISCV_TAIL_POLICY_DEFAULT; } GCC,riscv,84,enum rvv_arg_type_info::get_vector_type(int type) { switch (type) { case RVV_TYPE_V: return RVV_TYPE_V; case RVV_TYPE_VL: return RVV_TYPE_VL; case RVV_TYPE_VU: return RVV_TYPE_VU; case RVV_TYPE_VX: return RVV_TYPE_VX; case RVV_TYPE_VXL: return RVV_TYPE_VXL; case RVV_TYPE_VXU: return RVV_TYPE_VXU; default: return RVV_TYPE_INVALID; } } GCC,riscv,85, GCC,riscv,86,"void handle_pragma_vector(void) { int vector_size = riscv_vector_size(); if (vector_size < 0 || vector_size > 1023) { error(""Invalid vector size %d"", vector_size); return; } riscv_vector_set_target_specific_value(vector_size); }" GCC,riscv,87,"uint32_t hash(uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t ret_type) { uint32_t result = 0; result = (result << 1) ^ arg1; result = (result << 1) ^ arg2; result = (result << 1) ^ arg3; result = (result << 1) ^ ret_type; result = (result << 1) ^ (result >> 1); return result; }" GCC,riscv,88,has_merge_operand_p (rtx_insn *insn) { return true; } GCC,riscv,89,index_mode (tree type) { if (TREE_CODE (type) == INTEGER_TYPE) { if (TYPE_PRECISION (type) == 32) return SImode; else if (TYPE_PRECISION (type) == 64) return DImode; else return QImode; } else if (TREE_CODE (type) == REAL_TYPE) { if (TYPE_PRECISION (type) == 32) return SFmode; else if (TYPE_PRECISION (type) == 64) return DFmode; else return XFmode; } else if (TREE_CODE (type) == COMPLEX_TYPE) { if (TYPE_PRECISION (type) == 32) return SCmode; else if (TYPE_PRECISION (type) == 64) return DCmode; else return XCmode; } else { return QImode; } } GCC,riscv,90, GCC,riscv,91,lookup_vector_type_attribute (tree type) { if (TREE_CODE (type) != VECTOR_TYPE) return NULL; if (TREE_CODE (type) == RVV_TYPE) { return get_insn_attr_desc (type); } return NULL; } GCC,riscv,92,make_type_sizeless (tree type) { if (TREE_CODE (type) == INTEGER_TYPE) { TYPE_SIZELESS (type) = 1; TYPE_TARGET_SPECIFIC (type) = riscv_sizeless_type; } } GCC,riscv,93,mangle_builtin_type (tree type) { if (TREE_CODE (type) == RECORD_TYPE) { if (TYPE_RVV_ABI_BUILTIN (type)) { return TYPE_RVV_ABI_BUILTIN_NAME (type); } } return NULL_TREE; } GCC,riscv,94,mask_mode (tree type) { if (TREE_CODE (type) != MASK_TYPE) return VOIDmode; machine_mode mode = TYPE_MODE (type); if (mode < 0 || mode > MAX_MACHINE_MODE) return VOIDmode; return mode; } GCC,riscv,95,"bool modifies_global_state_p (tree fndecl) { if (TREE_CODE (fndecl) == FUNCTION_DECL && lookup_attribute (""modifies_global_state"", DECL_ATTRIBUTES (fndecl))) return true; if (TREE_CODE (fndecl) == FUNCTION_DECL && TREE_CODE (DECL_SAVED_TREE (fndecl)) == BLOCK) return true; if (TREE_CODE (fndecl) == FUNCTION_DECL && TREE_CODE (DECL_SAVED_TREE (fndecl)) == BLOCK && lookup_attribute (""modifies_global_state"", DECL_ATTRIBUTES (fndecl))) return true; return false; }" GCC,riscv,96,bool reads_global_state_p(void) { if (__builtin_riscv_has_side_effects()) { return true; } if (__builtin_riscv_accesses_global_variables()) { return true; } if (__builtin_riscv_accesses_shared_memory()) { return true; } if (__builtin_riscv_accesses_io_devices()) { return true; } if (__builtin_riscv_accesses_system_registers()) { return true; } return false; } GCC,riscv,97,register_builtin_type (tree type) { if (TREE_CODE (type) == RECORD_TYPE && TYPE_RISCV_ABI (type) == RISCV_ABI_RVV) { riscv_register_builtin_type (type); } } GCC,riscv,98, GCC,riscv,99,"void register_function_group(struct gcc_target *target, struct function_group *group) { struct function *func; for (func = group->functions; func; func = func->next) { if (func->target == target) { target->register_function(func); } } }" GCC,riscv,100,"void register_vector_type(TYPE type) { riscv_vector_register_type(type, ""riscv_vector.h""); }" GCC,riscv,101,"void report_non_ice(const char *location, const char *fndecl, int argno) { error(""argument %d of %qE must be an integer constant"", argno, fndecl); }" GCC,riscv,102,"void report_out_of_range(const char *fn_decl, int arg_no, int actual, int min, int max) { fprintf(stderr, ""Passing %d to argument %d of %s, which expects a value in the range [%d, %d]\n"", actual, arg_no, fn_decl, min, max); } ``` This function takes five arguments: * `fn_decl`: The name of the function being called. * `arg_no`: The number of the argument being passed (counting from zero). * `actual`: The actual value being passed as the argument. * `min`: The minimum value that the argument is expected to have. * `max`: The maximum value that the argument is expected to have. The function uses the `fprintf` function to print an error message to the standard error stream (stderr) indicating that the value being passed as the argument is out of range. The message includes the name of the function being called, the number of the argument being passed, the actual value being passed, and the expected range of values for the argument. You can use this function in your riscv backend of GCC Compiler by calling it whenever you detect that an argument is out of range. For example: ``` if (actual < min || actual > max) { report_out_of_range(fn_decl, arg_no, actual, min, max); }" GCC,riscv,103,required_extensions_p (tree type) { if (TREE_CODE (type) == RECORD_TYPE) { if (TYPE_REQUIRED_EXTENSIONS (type) != NULL_TREE) return true; } return false; } GCC,riscv,104,"void require_immediate(int argno, int *value_out) { if (argno < 0 || argno >= riscv_target_specific_value) { error(""Invalid argument number""); return; } if (!riscv_target_specific_value[argno].is_immediate) { error(""Argument is not an immediate""); return; } *value_out = riscv_target_specific_value[argno].value; }" GCC,riscv,105,"require_immediate_range (int min, int max, int rel_argno) { rtx op = XVECEXP (PATTERN (insn), 1, rel_argno); if (GET_CODE (op) != CONST_INT || INTVAL (op) < min || INTVAL (op) > max) error (""immediate operand %d is out of range [%d, %d]"", rel_argno, min, max); }" GCC,riscv,106,ret_mode (tree type) { switch (TREE_CODE (type)) { case VOID_TYPE: return VOIDmode; case INTEGER_TYPE: if (TYPE_PRECISION (type) <= 32) return SImode; else if (TYPE_PRECISION (type) <= 64) return DImode; else return TImode; case REAL_TYPE: if (TYPE_PRECISION (type) <= 32) return SFmode; else if (TYPE_PRECISION (type) <= 64) return DFmode; else return XFmode; case COMPLEX_TYPE: if (TYPE_PRECISION (type) <= 32) return SCmode; else if (TYPE_PRECISION (type) <= 64) return DCmode; else return XCmode; case VECTOR_TYPE: if (TYPE_PRECISION (type) <= 32) return V4SImode; else if (TYPE_PRECISION (type) <= 64) return V2DImode; else return V1TImode; default: gcc_unreachable (); } } GCC,riscv,107,sizeless_type_p (tree type) { return (type == RVV_SIZELSS_TYPE); } GCC,riscv,108,"use_compare_insn (rtx_insn *icode, rtx_insn *operands) { rtx_insn *insn = icode; rtx_insn *arg; int i; for (i = 0; i < XVECLEN (operands, 0); i++) { arg = XVECEXP (operands, 0, i); if (arg == NULL_RTX) continue; if (REG_P (arg)) insn = emit_insn (gen_rtx_SET (VOIDmode, arg, gen_rtx_REG (GET_MODE (arg), REGNO (arg)))); else { insn = emit_insn (gen_rtx_SET (VOIDmode, arg, gen_rtx_CONST_INT (GET_MODE (arg), INTVAL (arg)))); } } emit_insn (gen_rtx_CALL (VOIDmode, insn, XVECLEN (operands, 0))); }" GCC,riscv,109,"use_contiguous_load_insn (rtx_insn *insn) { if (GET_CODE (insn) != INSN || GET_CODE (PATTERN (insn)) != SET || GET_CODE (SET_SRC (PATTERN (insn))) != MEM || GET_CODE (SET_DEST (PATTERN (insn))) != REG) return false; rtx mem = SET_SRC (PATTERN (insn)); rtx reg = SET_DEST (PATTERN (insn)); if (GET_MODE (mem) != GET_MODE (reg)) return false; if (GET_CODE (mem) == MEM && GET_CODE (XEXP (mem, 0)) == REG && REGNO (XEXP (mem, 0)) == REGNO (reg)) return true; return false; }" GCC,riscv,110, GCC,riscv,111,"void use_exact_insn(icode_t icode, int nargs, ...) { va_list ap; int i; va_start(ap, nargs); for (i = 0; i < nargs; i++) { int arg = va_arg(ap, int); if (arg == riscv,0) { } else { } } va_end(ap); }" GCC,riscv,112,bool use_mask_predication_p(void) { return true; } GCC,riscv,113,"use_real_mask_p (void) { return TARGET_REAL_MASK; } ``` This function uses the `TARGET_REAL_MASK` macro, which is defined in the RISC-V backend of the GCC compiler, to determine whether predication is using a real mask operand. If `TARGET_REAL_MASK` is non-zero, then predication is using a real mask operand, and the function returns true. Otherwise, the function returns false. You can use this function in your code to determine whether predication is using a real mask operand for the RISC-V backend of the GCC compiler. For example: ``` if (use_real_mask_p()) { } else { }" GCC,riscv,114,"use_real_merge_p (rtx_insn *insn) { if (GET_CODE (insn) != INSN) return false; rtx_insn *pred = PATTERN (insn); if (GET_CODE (pred) != PARALLEL) return false; rtx_insn *merge = XVECEXP (pred, 0, 0); if (GET_CODE (merge) != INSN) return false; rtx_insn *set = PATTERN (merge); if (GET_CODE (set) != SET) return false; rtx_insn *src = SET_SRC (set); if (GET_CODE (src) != REG) return false; rtx_insn *dst = SET_DEST (set); if (GET_CODE (dst) != REG) return false; return true; }" GCC,riscv,115,"use_scalar_move_insn (rtx_insn *icode, rtx_insn *operands) { rtx_insn *insn = icode; rtx_insn *op = operands; for (int i = 0; i < icode->n_operands; i++) { rtx_insn *op_insn = op[i]; if (op_insn->type == RTX_REG) { rtx_insn *move_insn = riscv_emit_move_insn (icode, op_insn); insn = riscv_emit_insn (icode, move_insn); } else { rtx_insn *copy_insn = riscv_emit_copy_insn (icode, op_insn); insn = riscv_emit_insn (icode, copy_insn); } } insn = riscv_emit_call_insn (icode, insn); riscv_cleanup_insn_stream (icode); }" GCC,riscv,116,"use_ternop_insn (rtx_insn *icode, rtx_insn *operands) { if (icode->code != CODE_FOR_ternop) return; rtx_insn *op0 = icode->operands[0]; rtx_insn *op1 = icode->operands[1]; rtx_insn *op2 = icode->operands[2]; if (!op0 || !op1 || !op2) return; emit_insn (icode, op0, op1, op2); }" GCC,riscv,117, GCC,riscv,118,vector_mode (tree type) { if (TREE_CODE (type) != VECTOR_TYPE) return VOIDmode; switch (TREE_VECTOR_TYPE_KIND (type)) { case RISCV_VECTOR_TYPE_KIND_INT: return SImode; case RISCV_VECTOR_TYPE_KIND_FLOAT: return SFmode; default: return VOIDmode; } } GCC,riscv,119,"verify_type_context (tree type) { if (TREE_CODE (type) == ARRAY_TYPE || TREE_CODE (type) == VECTOR_TYPE) { if (TYPE_SIZE (type) == NULL_TREE || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) error (""RVV type %qT does not have a fixed size"", type); } if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) { if (TYPE_ALIGN (type) == 0) error (""RVV type %qT does not have a defined alignment"", type); }" GCC,riscv,120,"anticipatable_occurrence_p (rtx_insn *insn, rtx_insn *occurrence) { if (GET_CODE (insn) != CODE_FOR_vsetvl) return false; if (modified_between_p (insn, occurrence, XEXP (insn, 0)) || modified_between_p (insn, occurrence, XEXP (insn, 1))) return false; if (used_between_p (insn, occurrence, XEXP (insn, 0))) return false; return true; }" GCC,riscv,121,"bool any_set_in_bb_p (rtx_insn *insns, basic_block bb) { for (rtx_insn *insn = insns; insn != NULL; insn = insn->next) { if (insn->bb == bb) return true; } return false; }" GCC,riscv,122,"bool available_occurrence_p (rtx_insn *insn, rtx_insn *next_insn) { if (GET_CODE (insn) != VSETVL) return false; if (next_insn != NULL) return false; if (REG_NOTE_KIND (insn) == REG_NOTE_KIND_VSETVL_ZERO) { if (REG_NOTE_KIND (next_insn) == REG_NOTE_KIND_VSETVL_ZERO) return false; if (REG_NOTE_KIND (next_insn) == REG_NOTE_KIND_VSETVL_RS1) return false; } else if (REG_NOTE_KIND (insn) == REG_NOTE_KIND_VSETVL_RS1) { if (REG_NOTE_KIND (next_insn) == REG_NOTE_KIND_VSETVL_RS1) return false; }" GCC,riscv,123, GCC,riscv,124, GCC,riscv,125,"cleanup_illegal_dirty_blocks (void) { if (riscv_target_specific_value) { if (riscv_illegal_block_p ()) { riscv_cleanup_illegal_block (); } } } ``` This function checks if the current block is illegal using the `riscv_illegal_block_p` function, and if it is, it cleans up the illegal block using the `riscv_cleanup_illegal_block` function. The `riscv_target_specific_value` variable is used to determine if the current target is RISC-V. You can call this function in the RISC-V backend of GCC by adding a call to it in the appropriate place in the code. For example, you could call it in the `riscv_expand_block` function, which is called to expand a block of code: ``` void riscv_expand_block (rtx_insn *insn) { cleanup_illegal_dirty_blocks (); }" GCC,riscv,126,"int compare_lmul(int lmul1, int lmul2) { if (lmul1 == lmul2) { return 0; } else if (lmul1 < lmul2) { return -1; } else { return 1; } } ``` This function takes two `int` arguments `lmul1` and `lmul2`, and returns an `int` value indicating the result of the comparison. If `lmul1` is equal to `lmul2`, the function returns 0. If `lmul1` is less than `lmul2`, the function returns -1. If `lmul1` is greater than `lmul2`, the function returns 1. Here is an example of how you could use this function in a GCC backend for the RISC-V architecture: ``` #include int main() { int lmul1 = 1; int lmul2 = 2; int result = compare_lmul(lmul1, lmul2); if (result == 0) { printf(""LMUL1 == LMUL2\n""); } else if (result == -1) { printf(""LMUL1 < LMUL2\n""); } else { printf(""LMUL1 > LMUL2\n""); } return 0; }" GCC,riscv,127,"compatible_p (rtx_insn *insn, rtx_insn *this, rtx_insn *require) { if (!INSN_CODE (insn) == CODE_FOR_vector) return false; if (!this || !this->vsetvli) return false; if (!require || !require->vsetvli) return false; if (!compatible_p (require, this->vsetvli)) return false; if (!compatible_p (this->vsetvli, require)) return false; return true; }" GCC,riscv,128,"static void compute_local_backward_infos(struct gcc_plugin_info *plugin_info) { struct gcc_plugin_info_backward *backward_info; backward_info = plugin_info->backward_info; backward_info->target_specific_value = ""Unexpected Invalid demanded info""; }" GCC,riscv,129, GCC,riscv,130,"compute_probabilities (void) { basic_block bb; FOR_EACH_BB (bb) { int prob = 0; edge e; edge_iterator ei; FOR_EACH_EDGE (e, ei, bb->succs) { if (e->flags & EDGE_FALLTHRU) prob += e->probability; } bb->probability = prob; } }" GCC,riscv,131, GCC,riscv,132,"execute (void) { const char *target_specific_value = ""riscv,0,0,0,0""; return true; }" GCC,riscv,133,"extract_single_source (tree source) { if (TREE_CODE (source) != RTL_EXPR) return NULL_TREE; tree real_source = TREE_OPERAND (source, 0); if (TREE_CODE (real_source) != RTL_EXPR) return NULL_TREE; tree target = TREE_OPERAND (real_source, 0); if (TREE_CODE (target) != RTL_EXPR) return NULL_TREE; tree source_rtl = TREE_OPERAND (target, 0); if (TREE_CODE (source_rtl) != RTL_EXPR) return NULL_TREE; tree source_real = TREE_OPERAND (source_rtl, 0); if (TREE_CODE (source_real) != RTL_EXPR) return NULL_TREE; tree source_single = TREE_OPERAND (source_real, 0); if (TREE_CODE (source_single) != RTL_EXPR) return NULL_TREE; return source_single; }" GCC,riscv,134,fault_first_load_p (rtx_insn *insn) { if (insn->code == CODE_FOR_fault_first_load) return true; else return false; } GCC,riscv,135, GCC,riscv,136,"gen_vsetvl_pat (rtx *operands, int icode) { rtx op0 = operands[0]; rtx op1 = operands[1]; rtx op2 = operands[2]; emit_insn (gen_rtx_SET (op0, gen_rtx_VSET (op1, op2, gen_rtx_REG (SImode, RISCV_V0_REGNUM)))); }" GCC,riscv,137,typedef struct { uint32_t id; uint32_t start_addr; uint32_t end_addr; uint32_t predecessor_count; uint32_t *predecessors; } cfg_bb; void get_all_predecessors(cfg_bb *cfg_bb) { uint32_t i; for (i = 0; i < cfg_bb->predecessor_count; i++) { cfg_bb *predecessor = cfg_bb->predecessors[i]; if (predecessor->id != cfg_bb->id) { get_all_predecessors(predecessor); } } } GCC,riscv,138,"static void get_all_sets(rtx_insn *insn, enum def_type def_type) { if (insn->def_type == def_type) { add_to_set(insn); } for (int i = 0; i < insn->num_operands; i++) { rtx_insn *op = insn->operands[i]; if (op->def_type == def_type) { add_to_set(op); } } } static void add_to_set(rtx_insn *insn) { insn->set = 1; }" GCC,riscv,139,get_avl (rtx op) { if (GET_CODE (op) == REG) return REGNO (op); else if (GET_CODE (op) == MEM) return MEM_ADDR (op); else return 0; } GCC,riscv,140,uint32_t get_default_ma(void) { return 0x1; } GCC,riscv,141,get_default_ta (void) { return TAIL_POLICY_RISCV; } GCC,riscv,142,static inline int get_ma(int mask_op_idx) { switch (mask_op_idx) { case 0: return RISCV_OPCODE_MASK_0; case 1: return RISCV_OPCODE_MASK_1; case 2: return RISCV_OPCODE_MASK_2; case 3: return RISCV_OPCODE_MASK_3; default: return -1; } } GCC,riscv,143,static inline int get_ratio(int mode) { switch (mode) { case RISCV_MODE_M: return 1; case RISCV_MODE_S: return 2; case RISCV_MODE_U: return 4; default: return 0; } } GCC,riscv,144,"static inline int get_sew(void) { int sew; asm volatile(""csrr %0, vtype"" : ""=r""(sew)); return sew; }" GCC,riscv,145,get_ta (uint64_t tail_op_idx) { uint64_t ta = 0; switch (tail_op_idx) { case 0: ta = TA_0; break; case 1: ta = TA_1; break; case 2: ta = TA_2; break; case 3: ta = TA_3; break; default: ta = TA_4; break; } return ta; } GCC,riscv,146,"int get_vl(void) { return riscv,0,0; }" GCC,riscv,147,vlmul_type get_vlmul(riscv_vector *vtype) { return vtype->vlmul; } GCC,riscv,148,get_vl_vtype_info (rtx_insn *insn) { int vl = INSN_VL (insn); int vtype = INSN_VTYPE (insn); if (vl == 0 && vtype == 0) return; if (vl == 0) vl = VL_DEFAULT; if (vtype == 0) vtype = VTYPE_DEFAULT; INSN_VL (insn) = vl; INSN_VTYPE (insn) = vtype; } GCC,riscv,149,"hard_empty_block_p (rtx_insn *insn) { if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET && GET_CODE (SET_SRC (PATTERN (insn))) == REG && GET_CODE (SET_DEST (PATTERN (insn))) == REG) { if (REGNO (SET_SRC (PATTERN (insn))) == REGNO (SET_DEST (PATTERN (insn)))) return true; } return false; } ``` This function takes an `rtx_insn` pointer as an argument, which represents an instruction in the RISC-V assembly code. It checks if the instruction is a `SET` instruction, and if the source and destination registers are the same. If they are, it returns `true`, indicating that the instruction is a hard empty block. Otherwise, it returns `false`. You can use this function in the `get_backward_fusion_type` function to check if an instruction is a hard empty block, and if so, disable the backward fusion optimization for that instruction. Here's an example of how you can modify the `get_backward_fusion_type` function to use the `hard_empty_block_p` function: ```c static enum fusion_type get_backward_fusion_type (rtx_insn *insn) { if (hard_empty_block_p (insn)) return FUSION_TYPE_NONE; }" GCC,riscv,150, GCC,riscv,151,"bool has_vl_op(const char *insn) { if (insn[0] == 'v' && insn[1] == 'l') { return true; } return false; } ``` This function takes a single argument `insn`, which is a string representing the instruction. It checks if the first two characters of the instruction are ""v"" and ""l"", which are the prefixes for RVV instructions that depend on the VL global status register. If the first two characters match, the function returns `true`, otherwise it returns `false`. You can use this function in your GCC backend to check if an instruction is an RVV instruction that depends on the VL global status register. For example: ``` if (has_vl_op(""vadd.vv"")) { }" GCC,riscv,152,"bool has_vtype_op(const char *op) { if (strncmp(op, ""riscv,0"", 6) == 0) { return true; } return false; } ``` This function takes a string `op` as an argument, which is the name of the instruction to check. It first checks if the instruction name starts with ""riscv,0"", which indicates that it is an RVV instruction that depends on the VTYPE global status register. If the instruction name starts with ""riscv,0"", the function returns true, otherwise it returns false. You can use this function in your GCC backend to check if an instruction is an RVV instruction that depends on the VTYPE global status register. For example: ``` if (has_vtype_op(""riscv,0,add"")) { }" GCC,riscv,153,bool ignore_vlmul_insn_p(const struct riscv_insn *insn) { if (insn->type != RISCV_INSN_TYPE_VECTOR) return false; if (insn->vtype.vlmul == 0) return true; return false; } GCC,riscv,154,"insert_insn_end_basic_block (basic_block bb, rtx expr) { rtx_insn *insn = emit_insn_after (expr, BB_END (bb)); BB_END (bb) = insn; }" GCC,riscv,155, GCC,riscv,156,loop_basic_block_p (const_rtx insn) { if (GET_CODE (insn) != JUMP_INSN) return false; if (GET_CODE (PATTERN (insn)) != LOOP_END) return false; if (GET_CODE (PATTERN (insn)) != BASIC_BLOCK) return false; if (GET_CODE (PATTERN (insn)) != LOOP_BEGIN) return false; if (GET_CODE (PATTERN (insn)) != REACHING_OUT) return false; return true; } GCC,riscv,157,mask_agnostic_p (rtx op) { if (GET_CODE (op) == REG) return REG_MASK_AGNOSTIC_P (op); else if (GET_CODE (op) == MEM) return MEM_MASK_AGNOSTIC_P (op); else return 0; } GCC,riscv,158,"bool need_vsetvl(dem_info curr_info, dem_info require, rtx insn) { if (curr_info.type != require.type) return true; if (curr_info.type == RISCV_TYPE_VSET) { if (require.type == RISCV_TYPE_VSET) { if (curr_info.vset.vl != require.vset.vl) return true; } else if (require.type == RISCV_TYPE_VL) { if (curr_info.vset.vl != require.vl) return true; } } else if (curr_info.type == RISCV_TYPE_VL) { if (require.type == RISCV_TYPE_VSET) { if (curr_info.vl != require.vset.vl) return true; } else if (require.type == RISCV_TYPE_VL) { if (curr_info.vl != require.vl) return true; } } return false; }" GCC,riscv,159,"prune_expressions (rtx_insn *insn) { int num = 0; rtx_insn *expr_list = NULL; rtx_insn *expr; for (expr = insn->expr; expr; expr = expr->next) { if (expr->code == CODE_EXPR && expr->operands[0]->code == CODE_EXPR && expr->operands[1]->code == CODE_EXPR) { expr_list = rtx_alloc (RTX_EXPR); expr_list->expr = expr; expr_list->next = NULL; num++; } } fprintf (stderr, ""Total VSETVL expression num = %d\n"", num); fprintf (stderr, ""Expression List:\n""); for (expr_list = expr_list; expr_list; expr_list = expr_list->next) { fprintf (stderr, ""Expr[%ld]:\n"", (long)expr_list->expr); fprintf (stderr, ""\n""); } }" GCC,riscv,160,read_vl_insn_p (rtx_insn *insn) { if (GET_CODE (insn) != INSN) return false; if (GET_MODE (insn) != VOIDmode) return false; if (GET_CODE (PATTERN (insn)) != SET) return false; if (GET_CODE (SET_SRC (PATTERN (insn))) != REG) return false; if (GET_CODE (SET_DEST (PATTERN (insn))) != REG) return false; if (GET_MODE (SET_SRC (PATTERN (insn))) != VOIDmode) return false; if (GET_MODE (SET_DEST (PATTERN (insn))) != VOIDmode) return false; if (GET_CODE (SET_SRC (PATTERN (insn))) == REG && GET_CODE (SET_DEST (PATTERN (insn))) == REG) return true; return false; } GCC,riscv,161, GCC,riscv,162,scalar_move_insn_p (rtx insn) { if (GET_CODE (insn) != INSN) return false; if (GET_CODE (PATTERN (insn)) != SET) return false; rtx dest = SET_DEST (PATTERN (insn)); rtx src = SET_SRC (PATTERN (insn)); if (GET_CODE (dest) != REG) return false; if (GET_CODE (src) != REG) return false; if (REGNO (dest) != REGNO (src)) return false; return true; } GCC,riscv,163, GCC,riscv,164,"bool support_relaxed_compatible_p(riscv_insn *insn) { if (insn->relaxed_compatible) return true; return false; } ``` This function takes a `riscv_insn` pointer as an argument, which represents the instruction to be checked. It then checks the `relaxed_compatible` field of the instruction, which indicates whether the instruction supports relaxed compatible check. If the field is set to true, the function returns true, indicating that the instruction supports relaxed compatible check. Otherwise, the function returns false. You can use this function in your GCC backend to check whether an instruction supports relaxed compatible check before attempting to relax it. For example: ``` if (support_relaxed_compatible_p(insn)) { } else { }" GCC,riscv,165,static inline uint32_t tail_agnostic_p(uint32_t op) { return (op & 0x1f) == 0x1f; } GCC,riscv,166, GCC,riscv,167, GCC,riscv,168,static inline int valid_sew_p(int sew) { return (sew == 8 || sew == 16 || sew == 32 || sew == 64); } GCC,riscv,169,"vector_config_insn_p (rtx_insn *insn) { if (GET_CODE (insn) != SET) return false; rtx dest = SET_DEST (insn); rtx src = SET_SRC (insn); if (GET_CODE (dest) != REG || GET_MODE (dest) != VOIDmode) return false; if (GET_CODE (src) != UNSPEC || XINT (src, 1) != UNSPEC_VSET) return false; return true; }" GCC,riscv,170, GCC,riscv,171,"vsetvl_discard_result_insn_p (rtx_insn *insn) { if (GET_CODE (insn) != SET) return false; rtx dest = SET_DEST (insn); rtx src = SET_SRC (insn); if (GET_CODE (dest) != REG || GET_MODE (dest) != VOIDmode) return false; if (GET_CODE (src) != VSET) return false; rtx vset_src = XEXP (src, 0); if (GET_CODE (vset_src) != REG || GET_MODE (vset_src) != VOIDmode) return false; if (REGNO (vset_src) != V0_REG) return false; return true; }" GCC,riscv,172,"vsetvl_insn_p (rtx_insn *insn) { if (GET_CODE (insn) == SET) { rtx dest = SET_DEST (insn); rtx src = SET_SRC (insn); if (GET_CODE (dest) == REG && GET_CODE (src) == REG) { int dest_regno = REGNO (dest); int src_regno = REGNO (src); if (dest_regno == src_regno) return true; } } return false; } ``` This function takes an `rtx_insn` pointer as an argument, which represents an instruction in the RISC-V instruction set. It first checks if the instruction is a `SET` instruction, and if so, it extracts the destination and source registers. If the destination and source registers are the same, the function returns true. Otherwise, it returns false. You can use this function in your RISC-V backend of GCC to determine if an instruction is a `vsetvldi` or `vsetvlsi` instruction. For example, you can use it in a function that generates the assembly code for a particular instruction, like this: ``` if (vsetvl_insn_p (insn)) { } else { }" GCC,riscv,173,"add_all_one_mask_operand (rtx op, rtx mask) { if (GET_CODE (op) == CONST_INT) { op = plus_constant (op, mask); } else { rtx new_op = gen_rtx_PLUS (GET_MODE (op), op, mask); op = new_op; } XEXP (op, 0) = op; }" GCC,riscv,174,"void add_input_operand (rtx op, rtx *m_ops, int *opno) { rtx new_op = gen_rtx_REG (op->mode, op->reg); m_ops[*opno] = new_op; (*opno)++; }" GCC,riscv,175,"void add_output_operand (rtx operand, int opno) { rtx output_operand = gen_rtx_REG (operand->mode, opno); M_OPS = gen_rtx_EXPR_LIST (operand, M_OPS); opno++; }" GCC,riscv,176," enum rtx_code code, int *undef_p) { if (code == UNKNOWN) return; if (mode == VOIDmode) { if (REG_P (operand)) { if (REGNO (operand) >= FIRST_VIRTUAL_REGISTER) { rtx undef = gen_rtx_UNKNOWN (mode); XVECEXP (operand, 0, 0) = undef; *undef_p = 1; } } else if (GET_CODE (operand) == MEM) { if (MEM_VIRTUAL_P (operand)) { rtx undef = gen_rtx_UNKNOWN (mode); XVECEXP (operand, 0, 0) = undef; *undef_p = 1; } } } }" GCC,riscv,177,"int calculate_ratio(int sew, int lmul) { int ratio; ratio = sew / lmul; return ratio; }" GCC,riscv,178,"bool const_vec_all_same_in_range_p (const_vector x, int minval, int maxval) { if (!x.is_const_vector()) return false; for (int i = 0; i < x.size(); i++) { if (x[i] != x[0]) return false; } for (int i = 0; i < x.size(); i++) { if (x[i] < minval || x[i] > maxval) return false; } return true; }" GCC,riscv,179,"emit_hard_vlmax_vsetvl (rtx_insn *insn) { rtx op0 = insn->operands[0]; rtx op1 = insn->operands[1]; rtx op2 = insn->operands[2]; emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_VEC_MAX (V4SImode, op1, op2))); emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_VEC_SET (V4SImode, op0, gen_rtx_VEC_MAX (V4SImode, op1, op2)))); }" GCC,riscv,180,"emit_pred_op (rtx dest, rtx src, int mask) { rtx temp; temp = gen_rtx_REG (SImode, mask); emit_insn (gen_rtx_SET (dest, gen_rtx_AND (SImode, src, temp))); }" GCC,riscv,181,"expand (rtx call) { rtx lhs = XEXP (call, 0); rtx rhs = XEXP (call, 1); return lhs; }" GCC,riscv,182,"static inline void gen_scalar_move_mask(rtx dest, rtx src, int mask) { int i; for (i = 0; i < 32; i++) { if (mask & (1 << i)) { emit_insn(gen_movsi(dest, src)); break; } } }" GCC,riscv,183,get_avl_type_rtx (void) { rtx avl_type = riscv_target_avl_type (); if (avl_type == NULL_RTX) avl_type = riscv_default_avl_type (); return avl_type; } GCC,riscv,184,static inline int get_ma(int mask_op_idx) { switch (mask_op_idx) { case 0: return RISCV_OPCODE_MASK_0; case 1: return RISCV_OPCODE_MASK_1; case 2: return RISCV_OPCODE_MASK_2; case 3: return RISCV_OPCODE_MASK_3; default: return -1; } } GCC,riscv,185,get_prefer_mask_policy (void) { enum riscv_mask_policy policy = RISCV_MASK_POLICY_NONE; if (TARGET_RISCV_MASK_POLICY) { policy = TARGET_RISCV_MASK_POLICY; } else if (TARGET_RISCV_MASK_POLICY_DEFAULT) { policy = TARGET_RISCV_MASK_POLICY_DEFAULT; } return policy; } GCC,riscv,186,"get_prefer_tail_policy(struct riscv_backend *backend) { return backend->prefer_tail_policy; } ``` This function takes a pointer to a `struct riscv_backend` as an argument, and returns the `prefer_tail_policy` field of that structure. The `prefer_tail_policy` field is an enumeration that specifies the preferred tail policy for the backend. You can use this function to get the preferred tail policy for a given backend, and then use that information to generate code that is optimized for that policy. For example: ```c #include ""riscv-backend.h"" static void generate_code(struct riscv_backend *backend) { enum riscv_tail_policy policy = get_prefer_tail_policy(backend); if (policy == RISCV_TAIL_POLICY_TAIL_CALL) { } else if (policy == RISCV_TAIL_POLICY_TAIL_JUMP) { } else { } }" GCC,riscv,187,get_ratio (void) { int ratio; switch (riscv_machine_mode) { case RISCV_MACHINE_MODE_32: ratio = 32; break; case RISCV_MACHINE_MODE_64: ratio = 64; break; default: ratio = 0; break; } return ratio; } GCC,riscv,188,"static inline int get_sew(void) { return riscv_8; } ``` This function is marked as `static inline` to indicate that it is a helper function that is not intended to be called directly by the user. The `riscv_8` value is a target-specific value that represents the scalar element width for RVV instructions. To use this function, you can call it from within a RVV instruction implementation, like this: ``` void riscv_vadd_vv(void) { int sew = get_sew(); }" GCC,riscv,189,get_ta (uint64_t tail_op_idx) { uint64_t ta = 0; switch (tail_op_idx) { case 0: ta = TA_0; break; case 1: ta = TA_1; break; case 2: ta = TA_2; break; case 3: ta = TA_3; break; default: ta = TA_4; break; } return ta; } GCC,riscv,190,"enum rvv_vector_mode get_vector_mode(unsigned int nunits, enum rvv_vector_mode inner_mode) { switch (inner_mode) { case RVV_MODE_V: return RVV_MODE_V; case RVV_MODE_V8: return RVV_MODE_V8; case RVV_MODE_V16: return RVV_MODE_V16; case RVV_MODE_V32: return RVV_MODE_V32; case RVV_MODE_V64: return RVV_MODE_V64; case RVV_MODE_V128: return RVV_MODE_V128; case RVV_MODE_V256: return RVV_MODE_V256; case RVV_MODE_V512: return RVV_MODE_V512; case RVV_MODE_V1024: return RVV_MODE_V1024; default: return RVV_MODE_V; } }" GCC,riscv,191,uint32_t get_vlmul(uint32_t vtype) { uint32_t vlmul; if (vtype & 0x80000000) { vlmul = (vtype >> 24) & 0x7; } else { vlmul = 0; } return vlmul; } GCC,riscv,192,uint64_t get_vl_x2_rtx(uint64_t vl) { return (vl << 1) | (vl >> 1); } GCC,riscv,193,"legitimize_move (rtx src, rtx dest) { if (!TARGET_RVV || !TARGET_RVV_FRACTIONAL_VECTOR_MODE) return; rtx temp = gen_reg_rtx (GET_MODE (dest)); emit_insn (gen_rtx_SET (VOIDmode, temp, src)); emit_insn (gen_rtx_SET (VOIDmode, dest, temp)); }" GCC,riscv,194,"int riscv_address_insns(machine_mode mode, rtx x, bool might_split_p) { int num_insns = 0; if (mode == VOIDmode) return 0; if (might_split_p) { for (int i = 0; i < GET_MODE_SIZE(mode); i += 4) { if (!riscv_address_valid_p(x, i, mode)) return 0; } } else { if (!riscv_address_valid_p(x, 0, mode)) return 0; } num_insns = riscv_address_insns_1(mode, x, might_split_p); return num_insns; }" GCC,riscv,195,"int riscv_binary_cost(rtx x, int single_insns, int double_insns) { int cost = 0; if (GET_MODE_SIZE(GET_MODE(x)) <= UNITS_PER_WORD) cost = single_insns; else cost = double_insns; if (GET_MODE_SIZE(GET_MODE(x)) > UNITS_PER_WORD) cost *= GET_MODE_SIZE(GET_MODE(x)) / UNITS_PER_WORD; return cost; }" GCC,riscv,196,"void riscv_block_move_loop(void *dest, const void *src, uint32_t length, uint32_t bytes_per_iter) { uint32_t i; for (i = 0; i < length; i += bytes_per_iter) { uint32_t *dest_ptr = (uint32_t *)dest + i; uint32_t *src_ptr = (uint32_t *)src + i; *dest_ptr = *src_ptr; } }" GCC,riscv,197,"int riscv_build_integer(rtx_insn *codes, int value) { int num_ops = 0; rtx_insn *load_insn = riscv_emit_load_imm(codes, value); num_ops++; return num_ops; }" GCC,riscv,198,"riscv_cannot_force_const_mem (rtx op, enum machine_mode mode) { if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == SYMBOL_REF) { if (SYMBOL_REF_CONSTANT_P (XEXP (op, 0))) { if (riscv_valid_const_mem_ref_p (op, mode)) return false; } } return true; } bool riscv_cannot_force_const_mem (rtx op, enum machine_mode mode) { return riscv_cannot_force_const_mem (op, mode); } ``` This function checks if the operand is a constant memory reference, and if so, it checks if the symbol is a constant and if the memory reference is a valid constant memory reference. If the memory reference is not a valid constant memory reference, the function returns `true`, indicating that the operand cannot be forced to be a constant memory reference. You can then use this function in your GCC backend to check if an operand can be forced to be a constant memory reference. For example: ``` if (riscv_cannot_force_const_mem (operand, mode)) { error (""operand %d cannot be forced to be a constant memory reference"", operand); return false; }" GCC,riscv,199,riscv_can_use_return_insn (rtx_insn *insn) { if (insn->epilogue == NULL) return true; if (insn->frame_pointer != NULL) return false; for (rtx_insn *insn2 = insn->next; insn2 != NULL; insn2 = insn2->next) { if (insn2->code == CODE_FOR_stack_adjust) return false; } return true; } GCC,riscv,200,"bool riscv_classify_address(rtx x, machine_mode mode, struct riscv_address_info *info, bool strict_p) { if (!riscv_valid_address_p(x, mode)) return false; info->mode = mode; info->address = x; info->strict_p = strict_p; return true; }" GCC,riscv,201,riscv_class_max_nregs (void) { return riscv_target_info.max_nregs; } GCC,riscv,202," struct riscv_bb *bb) { } ``` This function takes two arguments: `components`, which is a pointer to a `struct riscv_components` structure that contains the components of the RISC-V backend, and `bb`, which is a pointer to a `struct riscv_bb` structure that represents a basic block. The function is responsible for implementing the `TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB` macro, which is used to shrink-wrap the components of the RISC-V backend for a specific basic block. To implement this function, you will need to use the `riscv_components` structure to access the components of the RISC-V backend, and the `riscv_bb` structure to access the basic block. You can then use the `riscv_components` structure to implement the shrink-wrap operation for the basic block. Here is an example of how you could implement the `riscv_components_for_bb` function: ``` #include ""riscv-components.h"" static void riscv_components_for_bb(struct riscv_components *components, struct riscv_bb *bb) { riscv_components_shrink_wrap(components, bb); }" GCC,riscv,203, GCC,riscv,204,int riscv_const_insns(int x) { if (x == 0) { return 0; } else if (x == 1) { return 4; } else if (x == 2) { return 1; } else if (x == 3) { return 0; } else if (x == 4) { return 4; } else if (x == 5) { return 1; } else if (x == 6) { return 0; } else if (x == 7) { return 0; } else if (x == 8) { return 0; } else if (x == 9) { return 0; } else if (x == 10) { return 4; } else if (x == 11) { return 1; } else if (x == 12) { return 0; } else if (x == 13) { return 0; } else if (x == 14) { return 0; } else if (x == 15) { return 0; } else { return 0; } } GCC,riscv,205,static int riscv_convert_vector_bits(int arch) { switch (arch) { case riscv32: return 32; case riscv64: return 64; default: return 0; } } GCC,riscv,206, GCC,riscv,207,"riscv_dwarf_poly_indeterminate_value (tree type) { if (TREE_CODE (type) == POLY_TYPE) { return build_int_cst (TREE_TYPE (type), 0); } else { return build_int_cst (type, 0); } }" GCC,riscv,208, GCC,riscv,209,static void riscv_emit_epilogue_components(rtx_insn *epilogue) { rtx_insn *insn; rtx_insn *last_insn; for (insn = epilogue; insn; insn = NEXT_INSN(insn)) { if (GET_CODE(insn) == CODE_LABEL) { emit_label(insn); } else if (GET_CODE(insn) == CODE_INSN) { emit_insn(insn); } else if (GET_CODE(insn) == CODE_CLOBBER) { emit_clobber(insn); } } last_insn = insn; emit_insn(last_insn); } GCC,riscv,210," bool *invert_ptr) { riscv_emit_cmp(cmp0, cmp1, code); riscv_emit_branch(target, code); if (invert_ptr) { riscv_emit_xor(target, target, 1); *invert_ptr = true; } }" GCC,riscv,211,void riscv_emit_prologue_components(struct riscv_prologue *prologue) { riscv_emit_prologue_header(prologue); riscv_emit_prologue_body(prologue); riscv_emit_prologue_footer(prologue); } void riscv_emit_prologue_header(struct riscv_prologue *prologue) { riscv_emit_prologue_header_start(prologue); riscv_emit_prologue_header_body(prologue); riscv_emit_prologue_header_end(prologue); } void riscv_emit_prologue_header_start(struct riscv_prologue *prologue) { riscv_emit_prologue_header_start_label(prologue); riscv_emit_prologue_header_start_code(prologue); } void riscv_emit_prologue_header_start_label(struct riscv_prologue *prologue) { riscv_emit_prologue_header_start_label_name(prologue); } GCC,riscv,212,static void riscv_excess_precision(void) { if (__riscv_xlen == 32) { FLT_EVAL_METHOD = 0; } else if (__riscv_xlen == 64) { FLT_EVAL_METHOD = 1; } else if (__riscv_xlen == 128) { FLT_EVAL_METHOD = 2; } } GCC,riscv,213,"int riscv_expand_conditional_move(rtx op, rtx cons, rtx alt, rtx dest) { if (op == 0) return 0; if (op == 1) return 1; if (cons == 0) return 0; if (alt == 0) return 0; if (dest == 0) return 0; if (op == 1) return 1; if (cons == 1) return 1; if (alt == 1) return 1; if (dest == 1) return 1; return 0; }" GCC,riscv,214,"riscv_expand_conditional_move_onesided (rtx operands[3]) { rtx cond = operands[0]; rtx dest = operands[1]; rtx src = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, dest, gen_rtx_IF_THEN_ELSE (VOIDmode, cond, src, dest))); }" GCC,riscv,215,"static void riscv_expand_mult_with_const_int(rtx op0, rtx op1, rtx op2, rtx op3, rtx op4, rtx op5, rtx op6, rtx op7, rtx op8, rtx op9) { if (!riscv_m_extension_enabled_p()) return; poly_int_pod poly_size_offset = riscv_poly_int_size_offset(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9); rtx result = riscv_expand_mult(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, poly_size_offset); return result; }" GCC,riscv,216, GCC,riscv,217,static void riscv_expand_prologue (void) { emit_insn (gen_prologue ()); emit_insn (gen_riscv_target_specific ()); emit_insn (gen_epilogue ()); } GCC,riscv,218,"static inline void riscv_extend_comparands(rtx op0, rtx op1) { if (GET_MODE(op0) == QImode) { op0 = gen_rtx_ZERO_EXTEND(HImode, op0); } else if (GET_MODE(op0) == HImode) { op0 = gen_rtx_SIGN_EXTEND(SImode, op0); } if (GET_MODE(op1) == QImode) { op1 = gen_rtx_ZERO_EXTEND(HImode, op1); } else if (GET_MODE(op1) == HImode) { op1 = gen_rtx_SIGN_EXTEND(SImode, op1); } }" GCC,riscv,219,"void riscv_file_start(void) { fprintf(asm_out_file, ""\t.option %spic\n"", riscv_option_pic); fprintf(asm_out_file, ""\t.option norelax\n""); fprintf(asm_out_file, ""\t.option csr-check\n""); }" GCC,riscv,220,int riscv_first_stack_step(int spill_regs) { int stack_size = 0; stack_size += 8; stack_size += spill_regs; stack_size = (stack_size + 15) & ~15; return stack_size; } GCC,riscv,221,"riscv_floatn_mode (void) { riscv_floatn_mode_set (riscv, 16); } ``` This function is called by the GCC backend to set the target specific value for the RISC-V backend. It uses the `riscv_floatn_mode_set` function to set the value to 16, which is the value for the RISC-V backend. You can also use the `riscv_floatn_mode_set` function to set the value to a different value if needed. For example, if you want to set the value to 32, you can use the following code: ``` #include ""config.h"" #include ""system.h"" #include ""riscv-floatn.h"" static void riscv_floatn_mode (void) { riscv_floatn_mode_set (riscv, 32); } ``` This will set the value to 32, which is the value for the RISC-V backend. You can also use the `riscv_floatn_mode_set` function to set the value to a different value if needed. For example, if you want to set the value to 64, you can use the following code: ``` #include ""config.h"" #include ""system.h"" #include ""riscv-floatn.h"" static void riscv_floatn_mode (void) { riscv_floatn_mode_set (riscv, 64); }" GCC,riscv,222,"void riscv_for_each_saved_reg(void (*fn)(int reg, int sp_offset)) { int reg; int sp_offset; for (reg = 0; reg < RISCV_NUM_REGS; reg++) { if (riscv_reg_is_saved(reg)) { sp_offset = riscv_reg_offset(reg); fn(reg, sp_offset); } } }" GCC,riscv,223,static void riscv_get_separate_components(struct riscv_separate_components *components) { components->riscv_separate_components_init(); components->riscv_separate_components_get(); } GCC,riscv,224, GCC,riscv,225,int riscv_hard_regno_nregs(int regno) { switch (regno) { case 0: return 1; case 1: return 1; case 2: return 1; case 3: return 1; case 4: return 1; case 5: return 1; case 6: return 1; case 7: return 1; case 8: return 1; case 9: return 1; case 10: return 1; case 11: return 1; case 12: return 1; case 13: return 1; case 14: return 1; case 15: return 1; case 16: return 1; case 17: return 1; case 18: return 1; case 19: return 1; case 20: return 1; case 21: return 1; case 22: return 1; case 23: return 1; case 24: return 1; case 25: return 1; case 26: return 1; case 27: return 1; case 28: return 1; case 29: return 1; case 30: return 1; case 31: return 1; default: return 0; } } GCC,riscv,226,"riscv_initial_elimination_offset (rtx from, rtx to) { if (from == frame_pointer_rtx || from == arg_pointer_rtx) { if (to == stack_pointer_rtx || to == hard_frame_pointer_rtx) { } } }" GCC,riscv,227,"riscv_is_eh_return_data_register (unsigned int regno) { return (regno == RISCV_REG_EH_RETURN_DATA); } ``` In this function, we use the `RISCV_REG_EH_RETURN_DATA` macro from the `riscv.h` header file to check if the provided register number is equal to the eh return data register. If it is, the function returns TRUE. You can use this function in your GCC backend to check if a register is an eh return data register. For example: ``` if (riscv_is_eh_return_data_register (regno)) { }" GCC,riscv,228,"riscv_legitimize_poly_move (rtx dest, rtx src, rtx scratch) { if (GET_CODE (src) == CONST_POLY_INT) { rtx temp = gen_reg_rtx (GET_MODE (dest)); emit_insn (gen_movsi (temp, src)); emit_insn (gen_movsi (dest, temp)); } else { emit_insn (gen_movsi (dest, src)); } }" GCC,riscv,229,riscv_libgcc_floating_mode_supported_p (machine_mode mode) { if (mode == HFmode) return true; else return targetm.libgcc_floating_mode_supported_p (mode); } GCC,riscv,230,"int riscv_load_store_insns(rtx_insn *insn, rtx_insn *mem) { int num_insns = 0; if (GET_CODE(insn) == MEM) { if (GET_CODE(mem) == MEM) { num_insns = 2; } else { num_insns = 1; } } else { num_insns = 0; } return num_insns; }" GCC,riscv,231,"static const char *riscv_mangle_type(tree type) { if (TREE_CODE(type) == VECTOR_TYPE) { if (TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE) { if (TYPE_PRECISION(TREE_TYPE(type)) == 16) { return ""DF16_""; } } } return NULL; }" GCC,riscv,232,"static void riscv_move_integer(rtx dest, rtx value, rtx temp) { emit_insn(gen_rtx_SET(dest, value)); if (temp) riscv_force_temporary(temp); }" GCC,riscv,233,"static inline int riscv_next_saved_reg(int regno, int limit, int offset, int inc) { if (regno >= limit) return INVALID_REGNUM; if (inc) regno++; if (regno >= limit) return INVALID_REGNUM; offset += regno; return regno; }" GCC,riscv,234," const_tree type, bool named_arg) { if (AGGREGATE_TYPE_P (type) || (GET_MODE_SIZE (mode) > 8 && GET_MODE_CLASS (mode) == MODE_INT)) return true; return false; }" GCC,riscv,235, GCC,riscv,236,"enum riscv_promote_function_mode { RISCV_PROMOTE_FUNCTION_MODE_ALWAYS_PROMOTE, RISCV_PROMOTE_FUNCTION_MODE_NEVER_PROMOTE, RISCV_PROMOTE_FUNCTION_MODE_PROMOTE_IF_NEEDED }; static enum riscv_promote_function_mode riscv_promote_function_mode (tree type) { if (type == NULL_TREE) return RISCV_PROMOTE_FUNCTION_MODE_ALWAYS_PROMOTE; if (TREE_CODE (type) == INTEGER_TYPE) { if (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) return RISCV_PROMOTE_FUNCTION_MODE_PROMOTE_IF_NEEDED; else return RISCV_PROMOTE_FUNCTION_MODE_NEVER_PROMOTE; } return RISCV_PROMOTE_FUNCTION_MODE_ALWAYS_PROMOTE; }" GCC,riscv,237,"static int riscv_register_move_cost(int class, int mode, int reg1, int reg2) { int cost = 0; if (class == reg1 && class == reg2) { cost = 0; } else { cost = 1; } return cost; }" GCC,riscv,238,riscv_regmode_natural_size (void) { return 64; } GCC,riscv,239,"void riscv_report_v_required(const char *operation) { fprintf(stderr, ""Error: %s requires the RVV ISA extension, but it is disabled.\n"", operation); fprintf(stderr, ""You can enable RVV using the command-line option '-march=rvv' or by using the 'target' attribute or pragma.\n""); exit(1); } ``` This function takes a single argument, `operation`, which is the name of the operation that requires the vector extension. It prints an error message to `stderr` indicating that the operation requires the RVV ISA extension, but it is disabled. It also suggests how to enable the RVV ISA extension using the command-line option `-march=rvv` or by using the `target` attribute or pragma. Finally, it exits the program with a non-zero exit code. You can use this function in your GCC backend to report when a vector operation is required but the vector extension is disabled. For example: ``` if (riscv_vector_enabled()) { } else { riscv_report_v_required(""vector operation""); }" GCC,riscv,240,riscv_scalar_mode_supported_p (scalar_mode mode) { switch (mode) { case SImode: case HImode: case QImode: return true; default: return false; } } GCC,riscv,241,"riscv_secondary_memory_needed (rtx op, enum machine_mode mode) { if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE_SIZE (mode) > GET_MODE_SIZE (SImode)) return true; return false; }" GCC,riscv,242," tree va_list_type) { if (TARGET_RISCV) { tree arg_types = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); tree new_arg_types = NULL_TREE; for (int i = 0; i < TREE_VEC_LENGTH (arg_types); i++) { tree arg_type = TREE_VEC_ELT (arg_types, i); tree new_arg_type = NULL_TREE; if (TREE_CODE (arg_type) == REFERENCE_TYPE) { new_arg_type = build_reference_type (TREE_TYPE (arg_type)); } else if (TREE_CODE (arg_type) == POINTER_TYPE) { new_arg_type = build_pointer_type (TREE_TYPE (arg_type)); } else { new_arg_type = arg_type; } new_arg_types = tree_cons (NULL_TREE, new_arg_type, new_arg_types); }" GCC,riscv,243,"void riscv_set_return_address(word_mode_gpr ADDRESS, word_mode_gpr SCRATCH) { asm volatile(""mv %0, %1\n\t"" ""mv %2, %3\n\t"" ""mv %4, %5\n\t"" : ""=r"" (ADDRESS), ""=r"" (SCRATCH) : ""r"" (ADDRESS), ""r"" (SCRATCH) : ""memory""); }" GCC,riscv,244,"riscv_shamt_matches_mask_p (unsigned int shamt, unsigned int mask) { return (shamt & mask) == shamt; }" GCC,riscv,245,"riscv_split_doubleword_move (rtx dest, rtx src, rtx scratch) { if (TARGET_64BIT) { if (GET_CODE (dest) == REG && REGNO (dest) < FIRST_PSEUDO_REGISTER) { rtx dest_lo = gen_rtx_REG (DImode, REGNO (dest)); rtx dest_hi = gen_rtx_REG (DImode, REGNO (dest) + 1); rtx src_lo = gen_rtx_REG (DImode, REGNO (src)); rtx src_hi = gen_rtx_REG (DImode, REGNO (src) + 1); emit_insn (gen_movdi (dest_lo, src_lo)); emit_insn (gen_movdi (dest_hi, src_hi)); } else { rtx dest_lo = gen_rtx_MEM (DImode, dest); rtx dest_hi = gen_rtx_MEM (DImode, plus_constant (dest, 8)); rtx src_lo = gen_rtx_MEM (DImode, src); rtx src_hi = gen_rtx_MEM (DImode, plus_constant (src, 8)); emit_insn (gen_movdi (dest_lo, src_lo)); emit_insn (gen_movdi (dest_hi, src_hi)); }" GCC,riscv,246,"static void riscv_stack_align(poly_int_pod *stack_align) { if (stack_align->is_constant) { stack_align->value = stack_align->constant_value; } else { stack_align->value = 16; } } ``` This function takes a `poly_int_pod` pointer as an argument, which represents the stack alignment for the RISC-V backend of GCC. The function checks if the stack alignment is a constant, and if so, uses the constant value. If the stack alignment is not a constant, the function uses the default value of 16. You can use this function in your GCC backend code to handle stack alignment for RISC-V targets. For example, you can call this function in the `riscv_stack_align` function, which is called by the `riscv_stack_align` function in the GCC backend. ``` #include ""poly_int.h"" static void riscv_stack_align(poly_int_pod *stack_align) { riscv_stack_align(stack_align); }" GCC,riscv,247, target_specific_value target_specific_value) { if (mode == LO_SUM) { if (sym_type == SYM_TYPE_RISCV) { if (target_specific_value == 0 || target_specific_value == 1 || target_specific_value == 2) { return true; } } } return false; } GCC,riscv,248,"bool riscv_valid_offset_p(rtx x, machine_mode mode) { if (GET_CODE(x) == CONST_INT) { HOST_WIDE_INT offset = INTVAL(x); if (offset >= 0 && offset < 16) return true; } if (GET_CODE(x) == REG) { int regno = REGNO(x); if (regno >= 0 && regno < 32) return true; } return false; }" GCC,riscv,249,static unsigned int riscv_vector_alignment(tree type) { if (TREE_CODE(type) == VECTOR_TYPE) { return 8; } else { return TARGET_DEFAULT_ALIGNMENT; } } GCC,riscv,250,riscv_vector_mode_supported_p (machine_mode mode) { if (mode == VOIDmode) return false; if (mode == BLKmode) return true; if (mode == V4SImode || mode == V4SFmode) return true; if (mode == V2DImode || mode == V2DFmode) return true; return false; } GCC,riscv,251, GCC,riscv,252,riscv_v_adjust_bytesize (machine_mode mode) { switch (mode) { case VOIDmode: return 0; case QImode: return 1; case HImode: return 2; case SImode: return 4; case DImode: return 8; case TImode: return 16; case OImode: return 32; case XImode: return 64; case QQmode: return 128; case HHmode: return 256; case SHmode: return 512; case DDmode: return 1024; case TTmode: return 2048; case OOmode: return 4096; case XXmode: return 8192; default: gcc_unreachable (); } } GCC,riscv,253,riscv_v_adjust_nunits (machine_mode mode) { switch (mode) { case V2SFmode: return 2; case V4SFmode: return 4; case V8SFmode: return 8; case V16SFmode: return 16; case V32SFmode: return 32; case V64SFmode: return 64; default: return 1; } } GCC,riscv,254, GCC,riscv,255,static void riscv_v_adjust_scalable_frame(struct riscv_v_frame *frame) { frame->scalable_frame = riscv_v_get_scalable_frame(frame); frame->scalable_frame.prologue_size = riscv_v_get_prologue_size(frame); frame->scalable_frame.epilogue_size = riscv_v_get_epilogue_size(frame); } GCC,riscv,256,"bool riscv_v_ext_vector_mode_p(riscv_mode_t mode) { return (mode == RISCV_MODE_V); } ``` This function takes a `riscv_mode_t` argument, which is an enumeration of the different RISC-V modes. The function returns true if the mode is the RVV enabled mode, which is represented by the `RISCV_MODE_V` enumeration value. You can use this function in your GCC backend to check if a mode is the RVV enabled mode. For example: ``` if (riscv_v_ext_vector_mode_p(mode)) { } else { }" GCC,riscv,257,"static void riscv_zero_call_used_regs(rtx_insn *insn, HARD_REG_SET need_zeroed_hardregs) { HARD_REG_SET zeroed_hardregs; CLEAR_HARD_REG_SET(zeroed_hardregs); for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++) { if (TEST_HARD_REG_BIT(need_zeroed_hardregs, i)) { rtx_insn *zero_insn = emit_insn(gen_rtx_SET(VOIDmode, gen_rtx_REG(i), const0_rtx)); SET_HARD_REG_BIT(zeroed_hardregs, i); } } return zeroed_hardregs; }" GCC,riscv,258,"static int riscv_address_cost(rtx x, machine_mode mode, addr_space_t as, bool speed) { int cost = 0; if (as == ADDR_SPACE_GENERIC) { cost = 1; } else if (as == ADDR_SPACE_FLAT) { cost = 2; } else if (as == ADDR_SPACE_SEGMENTED) { cost = 3; } if (mode == QImode) { cost += 1; } else if (mode == HImode) { cost += 2; } else if (mode == SImode) { cost += 3; } else if (mode == DImode) { cost += 4; } if (speed) { cost += 1; } return cost; }" GCC,riscv,259,"int riscv_address_insns(machine_mode mode, rtx x, bool might_split_p) { int num_insns = 0; if (mode == VOIDmode) return 0; if (might_split_p) { if (!riscv_valid_address_p(mode, x)) return 0; } num_insns = 1; return num_insns; }" GCC,riscv,260,"riscv_add_offset (void *reg, int offset, int temp) { if (offset >= 0 && offset < 32) { return reg + offset; } else { temp = riscv_force_temporary (temp); return riscv_add (reg, temp, offset); } }" GCC,riscv,261,"riscv_arg_partial_bytes (tree type, tree size, int *partial_bytes) { if (TREE_CODE (type) == INTEGER_TYPE && TYPE_PRECISION (type) > 64) { *partial_bytes = TYPE_PRECISION (type) - 64; } else if (TREE_CODE (type) == REAL_TYPE && TYPE_PRECISION (type) > 64) { *partial_bytes = TYPE_PRECISION (type) - 64; } else { *partial_bytes = 0; } }" GCC,riscv,262,"int riscv_build_integer(rtx *codes, int value) { int num_ops = 0; rtx reg = gen_reg_rtx(SImode); emit_insn(gen_movsi(reg, value)); num_ops++; emit_insn(gen_movsi(gen_rtx_MEM(SImode, codes), reg)); num_ops++; return num_ops; }" GCC,riscv,263, GCC,riscv,264,"riscv_cannot_force_const_mem (rtx op, enum machine_mode mode) { if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == SYMBOL_REF) { if (SYMBOL_REF_CONSTANT_P (XEXP (op, 0))) { if (riscv_valid_const_mem_ref_p (op, mode)) return false; } } return true; } bool riscv_cannot_force_const_mem (rtx op, enum machine_mode mode) { return riscv_cannot_force_const_mem (op, mode); } ``` This function checks if the operand is a constant memory reference, and if so, it checks if the symbol is a constant and if the memory reference is a valid constant memory reference. If the memory reference is not a valid constant memory reference, the function returns `true`, indicating that the operand cannot be forced to be a constant memory reference. You can then use this function in your GCC backend to check if an operand can be forced to be a constant memory reference. For example: ``` if (riscv_cannot_force_const_mem (operand, mode)) { error (""operand %d cannot be forced to be a constant memory reference"", operand); return false; }" GCC,riscv,265,"bool riscv_canonicalize_int_order_test(riscv_code code, riscv_operand cmp1, riscv_mode mode) { if (!riscv_code_valid_for_mode(code, mode)) return false; if (!riscv_operand_valid_for_int_order_test(cmp1, mode)) return false; if (!riscv_can_combine_code_and_operand_for_int_order_test(code, cmp1, mode)) return false; riscv_code_update_for_int_order_test(code, cmp1, mode); return true; }" GCC,riscv,266,riscv_can_use_return_insn (rtx_insn *insn) { if (insn->epilogue == NULL) return true; if (insn->frame_pointer != NULL) return false; for (rtx_insn *insn2 = insn->next; insn2 != NULL; insn2 = insn2->next) { if (insn2->code == CODE_FOR_stack_adjust) return false; } return true; } GCC,riscv,267,"bool riscv_classify_address(rtx x, machine_mode mode, struct riscv_address_info *info, bool strict_p) { if (!riscv_valid_address_p(x, mode)) return false; info->mode = mode; info->address = x; info->strict_p = strict_p; return true; }" GCC,riscv,268,riscv_class_max_nregs (void) { return riscv_target_info.max_nregs; } GCC,riscv,269,riscv_conditional_register_usage (void) { if (TARGET_RISCV) { riscv_register_usage = riscv_register_usage_default; } else { riscv_register_usage = default_register_usage; } } GCC,riscv,270,"riscv_elf_select_rtx_section (rtx x, int reloc) { if (GET_CODE (x) == SYMBOL_REF) { const char *name = XSTR (x, 0); if (strncmp (name, "".rodata.cst"", 10) == 0) return "".rodata.cst""; else if (strncmp (name, "".s%s"", 3) == 0) return "".s%s""; } return NULL; }" GCC,riscv,271,"riscv_emit_float_compare (rtx *operands, enum rtx_code code) { rtx op0 = operands[0]; rtx op1 = operands[1]; rtx op2 = operands[2]; switch (code) { case EQ: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_EQ (VOIDmode, op1, op2))); break; case NE: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_NE (VOIDmode, op1, op2))); break; case GT: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_GT (VOIDmode, op1, op2))); break; case GE: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_GE (VOIDmode, op1, op2))); break; case LT: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LT (VOIDmode, op1, op2))); break; case LE: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LE (VOIDmode, op1, op2))); break; default: gcc_unreachable (); } }" GCC,riscv,272," bool *invert_ptr) { riscv_emit_cmp(cmp0, cmp1, code); riscv_emit_branch(target, code); if (invert_ptr) { riscv_emit_xor(target, target, 1); *invert_ptr = true; } }" GCC,riscv,273,"static void riscv_expand_prologue (void) { tree func = current_function_decl; tree proto = TREE_TYPE (func); int riscv = 0; int zero = 1; int one = 0; int two = 1; int three = 1; int four = 0; int five = 1; int six = 1; expand_prologue (func, proto, riscv, zero, one, two, three, four, five, six); }" GCC,riscv,274,"static inline void riscv_extend_comparands(rtx op0, rtx op1) { if (GET_MODE(op0) == QImode) { op0 = gen_rtx_ZERO_EXTEND(HImode, op0); } else if (GET_MODE(op0) == HImode) { op0 = gen_rtx_SIGN_EXTEND(SImode, op0); } if (GET_MODE(op1) == QImode) { op1 = gen_rtx_ZERO_EXTEND(HImode, op1); } else if (GET_MODE(op1) == HImode) { op1 = gen_rtx_SIGN_EXTEND(SImode, op1); } }" GCC,riscv,275,"void riscv_file_start(void) { fprintf(asm_out_file, ""\t.option %spic\n"", riscv_option_pic); fprintf(asm_out_file, ""\t.option %sno-pic\n"", riscv_option_no_pic); } ``` This function uses the `fprintf` function to write the target specific values to the assembly file. The `%spic` and `%sno-pic` format specifiers are used to insert the `pic` and `no-pic` options into the assembly code. You can also use the `riscv_option_pic` and `riscv_option_no_pic` variables to store the target specific values and use them in the `fprintf` function. ``` void riscv_file_start(void) { fprintf(asm_out_file, ""\t.option %spic\n"", riscv_option_pic); fprintf(asm_out_file, ""\t.option %sno-pic\n"", riscv_option_no_pic); }" GCC,riscv,276,static int riscv_first_stack_step(void) { int frame_size = 0; int callee_saved_regs = 0; frame_size = riscv_2_8_2_frame_size(); callee_saved_regs = riscv_2_8_2_callee_saved_regs(); frame_size += callee_saved_regs * 8; return frame_size; } GCC,riscv,277,"riscv_flatten_aggregate_argument (tree type, int regno) { if (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE && TREE_CODE (type) != QUAL_UNION_TYPE) return false; if (TYPE_FIELDS (type) == NULL_TREE || TREE_CHAIN (TYPE_FIELDS (type)) == NULL_TREE || TREE_CHAIN (TREE_CHAIN (TYPE_FIELDS (type))) != NULL_TREE) return false; if (TARGET_FLOAT_ARGS && TYPE_MODE (type) == SFmode && regno < FIRST_FP_REGNUM + 2) return true; return false; }" GCC,riscv,278,"static void riscv_force_address(rtx x, enum machine_mode mode) { if (!riscv_valid_address_p(x, mode)) { rtx reg = gen_reg_rtx(mode); emit_move_insn(reg, x); x = reg; } }" GCC,riscv,279,"riscv_force_binary (riscv_reg_t code, riscv_reg_t x, riscv_reg_t y, riscv_mode_t mode) { riscv_reg_t result; result = code * x + y; result = riscv_reg_new (mode, result); return result; }" GCC,riscv,280,"riscv_force_temporary (rtx value, rtx dest) { if (riscv_new_pseudos_allowed_p ()) { rtx reg = gen_reg_rtx (GET_MODE (value)); emit_move_insn (reg, value); return reg; } else { emit_move_insn (dest, value); return dest; } }" GCC,riscv,281,"void riscv_for_each_saved_reg(void (*fn)(int reg, int sp_offset)) { int reg; int sp_offset; for (reg = 0; reg < RISCV_NUM_REGS; reg++) { if (riscv_reg_is_saved(reg)) { sp_offset = riscv_reg_offset(reg); fn(reg, sp_offset); } } }" GCC,riscv,282,static void riscv_function_arg(struct gcc_target_function_arg *arg) { switch (arg->mode) { case TARGET_FUNCTION_ARG_REGISTER: break; case TARGET_FUNCTION_ARG_STACK: break; default: gcc_unreachable(); } } GCC,riscv,283,riscv_function_arg_advance (struct gcc_target_function_arg_advance *advance) { advance->arg_reg = RISCV_ARG_REG; advance->arg_reg_size = RISCV_ARG_REG_SIZE; advance->arg_reg_align = RISCV_ARG_REG_ALIGN; advance->arg_stack_size = RISCV_ARG_STACK_SIZE; advance->arg_stack_align = RISCV_ARG_STACK_ALIGN; } GCC,riscv,284,static unsigned int riscv_function_arg_boundary(tree type) { unsigned int alignment = TYPE_ALIGN(type); if (alignment < PARM_BOUNDARY) return PARM_BOUNDARY; if (alignment > STACK_BOUNDARY) return STACK_BOUNDARY; return alignment; } GCC,riscv,285, GCC,riscv,286,"riscv_function_value (tree type, tree decl, tree value) { if (TREE_CODE (type) == FUNCTION_TYPE) { if (TREE_CODE (value) == CALL_EXPR) { tree call_expr = value; tree return_type = TREE_TYPE (call_expr); if (return_type == NULL_TREE) return_type = void_type_node; tree mode = TYPE_MODE (return_type); if (mode == VOIDmode) mode = TYPE_MODE (type); if (mode == VOIDmode) return; tree valtype = TREE_TYPE (mode); if (valtype == NULL_TREE) valtype = void_type_node; if (valtype != return_type) return; }" GCC,riscv,287,"riscv_hard_regno_mode_ok_p (unsigned int regno, machine_mode mode) { if (regno >= FIRST_PSEUDO_REGISTER) return false; if (mode == VOIDmode || mode == BLKmode) return false; if (regno == 0 && mode == QImode) return false; return true; }" GCC,riscv,288,static int riscv_hard_regno_nregs(int regno) { switch (regno) { case 0: return 1; case 1: return 1; case 2: return 1; case 3: return 1; case 4: return 1; case 5: return 1; case 6: return 1; case 7: return 1; case 8: return 1; case 9: return 1; case 10: return 1; case 11: return 1; case 12: return 1; case 13: return 1; case 14: return 1; case 15: return 1; case 16: return 1; case 17: return 1; case 18: return 1; case 19: return 1; case 20: return 1; case 21: return 1; case 22: return 1; case 23: return 1; case 24: return 1; case 25: return 1; case 26: return 1; case 27: return 1; case 28: return 1; case 29: return 1; case 30: return 1; case 31: return 1; default: return 0; } } GCC,riscv,289,"bool riscv_leaf_function_p(void) { static bool memoized = false; static bool result = false; if (!memoized) { result = riscv_target_specific_value(0, 1, 1, 0); memoized = true; } return result; } ``` This function uses the `riscv_target_specific_value` function to retrieve the target-specific value for the current function, and then returns a boolean value indicating whether the current function is a leaf function based on the result of that function. The `memoized` variable is used to keep track of whether the result has already been computed and memoized, and the `result` variable is used to store the memoized result. To use this function, you can simply call it from within your RISC-V code, like this: ``` if (riscv_leaf_function_p()) { }" GCC,riscv,290,"riscv_legitimate_address_p (rtx addr, machine_mode mode, addr_space_t as) { if (REG_P (addr)) return true; if (CONSTANT_P (addr)) { if (riscv_legitimate_constant_p (addr, mode, as)) return true; } if (MEM_P (addr)) { if (riscv_legitimate_memory_reference_p (addr, mode, as)) return true; } return false; }" GCC,riscv,291,"riscv_legitimate_constant_p (rtx x) { if (GET_CODE (x) == CONST_INT) return true; if (GET_CODE (x) == CONST_DOUBLE) return true; if (GET_CODE (x) == CONST_VECTOR) return true; return false; } ``` This function checks if the given RTX `x` is a legitimate constant for the RISC-V backend of GCC. It returns `true` if `x` is a `CONST_INT`, `CONST_DOUBLE`, or `CONST_VECTOR`, and `false` otherwise. You can use this function in your GCC plugin by calling it with the appropriate RTX value. For example: ```c rtx x = ...; if (riscv_legitimate_constant_p (x)) { } else { }" GCC,riscv,292,"riscv_legitimize_address (rtx x, machine_mode mode, int strict_p) { if (riscv_legitimate_address_p (x, mode, strict_p)) return x; if (GET_CODE (x) == PLUS) { rtx op0 = XEXP (x, 0); rtx op1 = XEXP (x, 1); if (REG_P (op0) && REG_P (op1)) { if (GET_MODE (op0) == GET_MODE (op1)) { if (GET_MODE (x) == SImode) { rtx new_x = gen_rtx_PLUS (SImode, op0, op1); return new_x; }" GCC,riscv,293,"riscv_legitimize_const_move (rtx dest, rtx src) { if (CONSTANT_P (src) && !CONST_INT_P (src)) { rtx reg = gen_reg_rtx (GET_MODE (dest)); emit_move_insn (reg, src); src = reg; } emit_move_insn (dest, src); }" GCC,riscv,294,"riscv_legitimize_move (rtx dest, rtx src) { if (GET_CODE (dest) == REG && GET_CODE (src) == REG) { emit_insn (gen_rtx_SET (VOIDmode, dest, src)); } else if (GET_CODE (dest) == MEM && GET_CODE (src) == REG) { emit_insn (gen_rtx_SET (VOIDmode, dest, src)); } else if (GET_CODE (dest) == REG && GET_CODE (src) == MEM) { emit_insn (gen_rtx_SET (VOIDmode, dest, src)); } else if (GET_CODE (dest) == MEM && GET_CODE (src) == MEM) { rtx temp = gen_reg_rtx (GET_MODE (dest)); emit_insn (gen_rtx_SET (VOIDmode, temp, src)); emit_insn (gen_rtx_SET (VOIDmode, dest, temp)); } else { error (""invalid move instruction""); } }" GCC,riscv,295,"int riscv_load_store_insns(rtx_insn *insn, rtx_insn *mem) { int num_insns = 0; if (GET_CODE(insn) == MEM) { if (GET_CODE(mem) == MEM) { num_insns = 2; } else { num_insns = 1; } } else { num_insns = 0; } return num_insns; }" GCC,riscv,296," enum reg_class rclass, int outer_code) { int cost = 0; switch (outer_code) { case CODE_FOR_memory_move: cost = 1; break; case CODE_FOR_memory_move_double: cost = 2; break; case CODE_FOR_memory_move_quad: cost = 4; break; default: break; } if (mode == QImode) { cost *= 2; } else if (mode == HImode) { cost *= 4; } else if (mode == SImode) { cost *= 8; } else if (mode == DImode) { cost *= 16; } if (rclass == GENERAL_REGS) { cost *= 2; } else if (rclass == FP_REGS) { cost *= 4; } return cost; }" GCC,riscv,297, GCC,riscv,298,"riscv_option_override (void) { if (TARGET_RISCV) { if (TARGET_DIV) { if (!TARGET_ARCH) error (""-mdiv requires -march to subsume the % extension""); } if (TARGET_ABI_RV64) { if (!TARGET_ARCH) error (""requested ABI requires -march to subsume the %qc extension""); } if (TARGET_ABI_RV32) { if (!TARGET_ARCH) error (""ABI requires -march=rv%d"", TARGET_ABI_RV32); } } }" GCC,riscv,299, GCC,riscv,300,"riscv_output_move (rtx dest, rtx src) { enum machine_mode mode = GET_MODE (dest); enum riscv_insn_code icode; if (mode == QImode) icode = CODE_FOR_lbu; else if (mode == HImode) icode = CODE_FOR_lhu; else if (mode == SImode) icode = CODE_FOR_lw; else if (mode == DImode) icode = CODE_FOR_ld; else if (mode == SFmode) icode = CODE_FOR_fcvt_d_w; else if (mode == DFmode) icode = CODE_FOR_fcvt_d_w; else gcc_unreachable (); emit_insn (gen_rtx_SET (VOIDmode, dest, gen_rtx_fmt_ee (icode, mode, src, dest))); }" GCC,riscv,301, GCC,riscv,302, GCC,riscv,303,"bool riscv_pass_aggregate_in_fpr_pair_p (tree type, int *fields) { if (TREE_CODE (type) != RECORD_TYPE) return false; if (TYPE_TARGET_SPECIFIC (type) != ""riscv,2,0,0,0,0"") return false; for (int i = 0; i < TYPE_FIELDS (type); i++) fields[i] = TYPE_FIELD_ID (type, i); return true; }" GCC,riscv,304," const_tree type, bool named_arg) { if (AGGREGATE_TYPE_P (type) || (GET_MODE_SIZE (mode) > 8 && GET_MODE_CLASS (mode) == MODE_INT)) return true; return false; }" GCC,riscv,305," enum machine_mode mode2, int offset2) { if (regno == regno1 && regno == regno2) return; if (regno == regno2 && regno == regno1) return; if (regno != regno1 && regno != regno2) { riscv_pass_fpr(regno1, mode1, offset1); riscv_pass_fpr(regno2, mode2, offset2); } }" GCC,riscv,306,"riscv_pass_fpr_single (tree type, tree value, int regno) { if (TYPE_MODE (type) == SFmode && TYPE_MODE (value) == BLKmode) { tree field = TYPE_FIELDS (type); tree field_type = TREE_TYPE (field); tree field_value = build_simple_mem_ref (field); riscv_pass_by_reference (field_type, field_value, regno); } else { riscv_pass_by_value (type, value, regno); } }" GCC,riscv,307,int riscv_pass_mode_in_fpr_p (machine_mode mode) { if (mode == SFmode || mode == DFmode || mode == TFmode) return 1; else if (mode == SCmode || mode == DCmode || mode == TCmode) return 2; else return 0; } GCC,riscv,308,"void riscv_print_operand(FILE *stream, rtx op, int code) { switch (code) { case 'h': if (GET_CODE(op) == HIGH) op = XEXP(op, 0); fprintf(stream, ""h%d"", REG_P(op) ? REGNO(op) : -1); break; case 'R': if (GET_CODE(op) == LOW) op = XEXP(op, 0); fprintf(stream, ""r%d"", REG_P(op) ? REGNO(op) : -1); break; case 'C': if (GET_CODE(op) == COMPARE) { fprintf(stream, ""%%%s"", riscv_branch_condition_name(XEXP(op, 0))); } else { fprintf(stream, ""invalid use of '%%%c'"", code); } break; case 'A': if (GET_CODE(op) == MEMORY) { fprintf(stream, ""%s"", riscv_atomic_operation_suffix(XEXP(op, 0))); } else { fprintf(stream, ""invalid use of '%%%c'"", code); }" GCC,riscv,309," enum riscv_reloc_type hi_reloc) { const char *hi_reloc_str = NULL; const char *lo_reloc_str = NULL; switch (hi_reloc) { case RISCV_RELOC_HI: hi_reloc_str = ""%hi""; break; case RISCV_RELOC_LO: hi_reloc_str = ""%lo""; break; case RISCV_RELOC_PCREL_HI: hi_reloc_str = ""%pcrel_hi""; break; case RISCV_RELOC_PCREL_LO: hi_reloc_str = ""%pcrel_lo""; break; case RISCV_RELOC_TPREL_HI: hi_reloc_str = ""%tprel_hi""; break; case RISCV_RELOC_TPREL_LO: hi_reloc_str = ""%tprel_lo""; break; case RISCV_RELOC_S: hi_reloc_str = ""%s""; break; default: break; } if (hi_reloc_str) { fprintf (stderr, ""%s%s%s"", hi_reloc_str, context, op); } else { fprintf (stderr, ""%s"", op); } }" GCC,riscv,310,"static int riscv_register_move_cost(int class, int mode, int reg1, int reg2) { int cost = 0; if (class == reg1 && class == reg2) { cost = 0; } else { cost = 1; } return cost; }" GCC,riscv,311," bool strict_p) { if (regno >= FIRST_PSEUDO_REGISTER) return false; if (strict_p) { if (!TEST_HARD_REG_BIT (reg_class_contents[BASE_REGS], regno)) return false; } else { if (regno >= FIRST_FIXED_REGISTER || regno >= FIRST_PSEUDO_REGISTER) return false; } if (GET_MODE_CLASS (mode) == MODE_INT && regno >= FIRST_PSEUDO_REGISTER) { if (!TEST_HARD_REG_BIT (reg_class_contents[BASE_REGS], regno)) return false; }" GCC,riscv,312,"void riscv_restore_reg(uint32_t reg, uint32_t mem) { asm volatile(""lw %0, %1"" : ""=r""(reg) : ""m""(mem)); }" GCC,riscv,313,"riscv_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED) { return true; }" GCC,riscv,314, GCC,riscv,315,"void riscv_save_restore_reg(int regno, int mode, int offset) { if (mode == RISCV_MODE_SAVE) { riscv_push(regno); } else if (mode == RISCV_MODE_RESTORE) { riscv_pop(regno); } riscv_addi(RISCV_REG_SP, RISCV_REG_SP, offset); }" GCC,riscv,316," int n_unnamed_args) { int i; tree parm; cum->n_named_args = *n_named_args; cum->n_unnamed_args = n_unnamed_args; for (i = 0; i < cum->n_named_args; i++) { parm = TREE_VALUE (TREE_VEC_ELT (fntype, i)); if (TREE_CODE (parm) == PARM_DECL) { cum->named_arg_types[i] = TREE_TYPE (parm); cum->named_arg_names[i] = DECL_NAME (parm); } else { cum->named_arg_types[i] = NULL_TREE; cum->named_arg_names[i] = NULL_TREE; } } for (i = 0; i < cum->n_unnamed_args; i++) { parm = TREE_VALUE (TREE_VEC_ELT (fntype, i + cum->n_named_args)); if (TREE_CODE (parm) == PARM_DECL) { cum->unnamed_arg_types[i] = TREE_TYPE (parm); }" GCC,riscv,317,"void riscv_split_integer(uint64_t x, uint32_t *lo, uint32_t *hi) { *lo = (uint32_t)x; *hi = (uint32_t)(x >> 32); }" GCC,riscv,318,"riscv_split_symbol (machine_mode mode, rtx addr, rtx low_out, rtx temp) { if (mode == MAX_MACHINE_MODE) { if (GET_CODE (addr) == SYMBOL_REF) { rtx high = gen_rtx_HIGH (mode, addr); rtx low = gen_rtx_LOW (mode, addr); if (low_out != NULL_RTX) *low_out = low; return true; } else { return false; } } else { if (GET_CODE (addr) == MEM) { rtx high = gen_rtx_HIGH (mode, addr); rtx low = gen_rtx_LOW (mode, addr); if (low_out != NULL_RTX) *low_out = low; return true; } else { return false; } } }" GCC,riscv,319,"uint32_t riscv_subword(uint64_t OP, bool HIGH_P) { if (HIGH_P) { return (uint32_t)(OP >> 32); } else { return (uint32_t)OP; } }" GCC,riscv,320,"riscv_unspec_offset_high (rtx base, rtx symbol_type, rtx temp) { if (GET_CODE (symbol_type) == CONST_INT && INTVAL (symbol_type) == 32) { rtx high_part = gen_rtx_PLUS (Pmode, base, gen_rtx_CONST_INT (Pmode, INTVAL (symbol_type))); return high_part; } else return base; }" GCC,riscv,321,"riscv_use_save_libcall (rtx_insn *insn) { if (GET_CODE (insn) == CALL_INSN && GET_CODE (XEXP (XEXP (insn, 0), 0)) == SYMBOL_REF && XSTR (XEXP (XEXP (insn, 0), 0), 0) == ""__riscv_save_gprs"" && GET_CODE (XEXP (XEXP (insn, 0), 1)) == SYMBOL_REF && XSTR (XEXP (XEXP (insn, 0), 1), 0) == ""__riscv_restore_gprs"") return true; return false; } ``` This function takes an `rtx_insn` pointer as an argument, which represents an instruction in the RISC-V assembly code. It checks if the instruction is a `CALL_INSN` and if the first operand is a `SYMBOL_REF` with the name `__riscv_save_gprs` and the second operand is also a `SYMBOL_REF` with the name `__riscv_restore_gprs`. If both conditions are true, it returns `true`, otherwise it returns `false`. You can then use this function in your RISC-V backend code to determine whether to call the GPR save/restore routines. For example: ``` if (riscv_use_save_libcall (insn)) { ... }" GCC,riscv,322,"riscv_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p) { if (GET_CODE (x) != REG) return false; if (strict_p) { if (!REG_OK_STRICT (x, mode)) return false; } else { if (!REG_OK (x, mode)) return false; }" GCC,riscv,323, GCC,riscv,324,"bool riscv_valid_offset_p(rtx x, machine_mode mode) { if (GET_CODE(x) == CONST_INT) { HOST_WIDE_INT offset = INTVAL(x); if (offset >= 0 && offset < 16) return true; } if (GET_CODE(x) == REG) { int regno = REGNO(x); if (regno >= 0 && regno < 32) return true; } return false; }" GCC,riscv,325,"int riscv_address_insns(machine_mode mode, rtx x, bool might_split_p) { int num_insns = 0; if (mode == VOIDmode) return 0; if (might_split_p) { if (!riscv_valid_address_p(mode, x)) return 0; } num_insns = 1; return num_insns; }" GCC,riscv,326,"riscv_arg_partial_bytes (tree type, tree size, int *partial_bytes) { if (TREE_CODE (type) == INTEGER_TYPE && TYPE_PRECISION (type) > 64) { *partial_bytes = TYPE_PRECISION (type) - 64; } else if (TREE_CODE (type) == REAL_TYPE && TYPE_PRECISION (type) > 64) { *partial_bytes = TYPE_PRECISION (type) - 64; } else { *partial_bytes = 0; } }" GCC,riscv,327,"void riscv_block_move_straight(uint8_t *dest, uint8_t *src, uint32_t length) { uint32_t i; for (i = 0; i < length; i++) { dest[i] = src[i]; } }" GCC,riscv,328,"bool riscv_classify_address(rtx x, machine_mode mode, struct riscv_address_info *info, bool strict_p) { if (!riscv_valid_address_p(x, mode)) return false; info->mode = mode; info->address = x; info->strict_p = strict_p; return true; }" GCC,riscv,329,"riscv_conditional_register_usage (void) { if (TARGET_RISCV) { return true; } else { return false; } } ``` This function checks if the target is RISC-V using the `TARGET_RISCV` macro, and if it is, it returns `true` to indicate that the RISC-V register usage should be used. Otherwise, it returns `false` to indicate that the default register usage should be used. You can also use the `TARGET_RISCV_1` macro to check if the target is RISC-V version 1, and the `TARGET_RISCV_2` macro to check if the target is RISC-V version 2. ```c #include ""config.h"" #include ""system.h"" #include ""rtl.h"" #include ""tree.h"" #include ""insn-config.h"" static bool riscv_conditional_register_usage (void) { if (TARGET_RISCV_1) { return true; } else if (TARGET_RISCV_2) { return true; }" GCC,riscv,330,static unsigned int riscv_constant_alignment(unsigned int value) { switch (value) { case 0: return 0; case 1: return 1; case 2: return 2; case 4: return 4; case 8: return 8; case 16: return 16; case 32: return 32; case 64: return 64; case 128: return 128; case 256: return 256; case 512: return 512; case 1024: return 1024; case 2048: return 2048; case 4096: return 4096; case 8192: return 8192; case 16384: return 16384; case 32768: return 32768; case 65536: return 65536; default: return 0; } } GCC,riscv,331,"void riscv_expand_prologue(void) { riscv_prologue_pattern(riscv, 0, 0, 1, 0, 1, 1, 0, 1); }" GCC,riscv,332,"void riscv_file_start(void) { fprintf(asm_out_file, ""\t.option %spic\n"", riscv_pic_option); fprintf(asm_out_file, ""\t.option norelax\n""); }" GCC,riscv,333,"void riscv_for_each_saved_reg(void (*fn)(int reg, int sp_offset)) { int reg; int sp_offset; for (reg = 0; reg < RISCV_NUM_REGS; reg++) { if (riscv_reg_saved(reg)) { sp_offset = riscv_reg_sp_offset(reg); fn(reg, sp_offset); } } }" GCC,riscv,334,static void riscv_function_arg(struct gcc_target_function_arg *arg) { switch (arg->mode) { case TARGET_FUNCTION_ARG_REGISTER: break; case TARGET_FUNCTION_ARG_STACK: break; default: gcc_unreachable(); } } GCC,riscv,335,riscv_function_arg_advance (struct gcc_target_function_arg_advance *advance) { advance->arg_reg = RISCV_ARG_REG; advance->arg_reg_size = RISCV_ARG_REG_SIZE; advance->arg_reg_align = RISCV_ARG_REG_ALIGN; advance->arg_stack_size = RISCV_ARG_STACK_SIZE; advance->arg_stack_align = RISCV_ARG_STACK_ALIGN; } GCC,riscv,336, GCC,riscv,337,"bool riscv_hard_regno_mode_ok(unsigned int regno, machine_mode mode) { if (regno >= RISCV_NUM_REGS) return false; if (regno < RISCV_NUM_HARD_REGS) return true; if (regno >= RISCV_NUM_HARD_REGS && regno < RISCV_NUM_REGS) return false; return false; }" GCC,riscv,338,"riscv_legitimize_address (rtx x, machine_mode mode, int strict_p) { if (riscv_legitimate_address_p (x, mode, strict_p)) return x; if (GET_CODE (x) == PLUS) { rtx op0 = XEXP (x, 0); rtx op1 = XEXP (x, 1); if (REG_P (op0) && REG_P (op1)) { if (GET_MODE (op0) == GET_MODE (op1)) { if (GET_MODE (x) == SImode) { rtx new_x = gen_rtx_PLUS (SImode, op0, op1); return new_x; }" GCC,riscv,339,"riscv_legitimize_const_move (rtx dest, rtx src) { if (CONSTANT_P (src) && !CONST_INT_P (src)) { rtx reg = gen_reg_rtx (GET_MODE (dest)); emit_move_insn (reg, src); src = reg; } emit_move_insn (dest, src); }" GCC,riscv,340,"riscv_legitimize_move (rtx dest, rtx src) { if (GET_CODE (dest) == REG && GET_CODE (src) == REG) { emit_insn (gen_movsi (dest, src)); return; } if (GET_CODE (dest) == MEM && GET_CODE (src) == MEM) { rtx temp = gen_reg_rtx (SImode); emit_insn (gen_movsi (temp, src)); emit_insn (gen_movsi (dest, temp)); return; } if (GET_CODE (dest) == REG && GET_CODE (src) == MEM) { emit_insn (gen_movsi (dest, src)); return; } if (GET_CODE (dest) == MEM && GET_CODE (src) == REG) { emit_insn (gen_movsi (dest, src)); return; } rtx temp = gen_reg_rtx (SImode); emit_insn (gen_movsi (temp, src)); emit_insn (gen_movsi (dest, temp)); }" GCC,riscv,341, GCC,riscv,342, GCC,riscv,343,"riscv_output_move (rtx dest, rtx src) { enum machine_mode mode = GET_MODE (dest); enum riscv_insn_code icode; if (mode == QImode) icode = CODE_FOR_lbu; else if (mode == HImode) icode = CODE_FOR_lhu; else if (mode == SImode) icode = CODE_FOR_lw; else if (mode == DImode) icode = CODE_FOR_ld; else if (mode == SFmode) icode = CODE_FOR_fcvt_d_w; else if (mode == DFmode) icode = CODE_FOR_fcvt_d_w; else gcc_unreachable (); emit_insn (gen_rtx_SET (VOIDmode, dest, gen_rtx_fmt_ee (icode, mode, src, dest))); }" GCC,riscv,344," const_tree type, bool named_arg) { if (AGGREGATE_TYPE_P (type) || (GET_MODE_SIZE (mode) > 8 && GET_MODE_CLASS (mode) == MODE_INT)) return true; return false; }" GCC,riscv,345,"riscv_pass_fpr_single (tree type, tree value, int regno) { if (TYPE_MODE (type) == SFmode && TYPE_MODE (value) == BLKmode) { tree field = TYPE_FIELDS (type); tree field_type = TREE_TYPE (field); tree field_value = build_simple_mem_ref (field); riscv_pass_by_reference (field_type, field_value, regno); } else { riscv_pass_by_value (type, value, regno); } }" GCC,riscv,346," int n_unnamed_args) { int i; tree parm; cum->n_named_args = *n_named_args; cum->n_unnamed_args = n_unnamed_args; for (i = 0; i < cum->n_named_args; i++) { parm = TREE_VALUE (TREE_VEC_ELT (fntype, i)); if (TREE_CODE (parm) == PARM_DECL) { cum->named_arg_types[i] = TREE_TYPE (parm); cum->named_arg_names[i] = DECL_NAME (parm); } else { cum->named_arg_types[i] = NULL_TREE; cum->named_arg_names[i] = NULL_TREE; } } for (i = 0; i < cum->n_unnamed_args; i++) { parm = TREE_VALUE (TREE_VEC_ELT (fntype, i + cum->n_named_args)); if (TREE_CODE (parm) == PARM_DECL) { cum->unnamed_arg_types[i] = TREE_TYPE (parm); }" GCC,riscv,347,"void riscv_set_current_function(struct function *func) { if (func->attrs.riscv_target_specific_value != RISCV_TARGET_SPECIFIC_VALUE_NONE) { if (func->attrs.riscv_target_specific_value != RISCV_TARGET_SPECIFIC_VALUE_VALID) { error(""Invalid target specific value for function %s"", func->name); } } current_function = func; }" GCC,riscv,348,"void riscv_split_integer(uint64_t x, uint32_t *lo, uint32_t *hi) { *lo = (uint32_t)x; *hi = (uint32_t)(x >> 32); }" GCC,riscv,349,"riscv_split_symbol (machine_mode mode, rtx addr, rtx low_out, rtx temp) { if (mode == MAX_MACHINE_MODE) { if (GET_CODE (addr) == SYMBOL_REF) { rtx high = gen_rtx_HIGH (mode, addr); rtx low = gen_rtx_LOW (mode, addr); if (low_out != NULL_RTX) *low_out = low; return true; } else { return false; } } else { if (GET_CODE (addr) == MEM) { rtx high = gen_rtx_HIGH (mode, addr); rtx low = gen_rtx_LOW (mode, addr); if (low_out != NULL_RTX) *low_out = low; return true; } else { return false; } } }" GCC,riscv,350, GCC,riscv,351,"int riscv_address_insns(machine_mode mode, rtx x, bool might_split_p) { int num_insns = 0; if (mode == VOIDmode) return 0; if (might_split_p) { for (int i = 0; i < GET_MODE_SIZE(mode); i += 4) { if (!riscv_address_valid_p(x, i, mode)) return 0; } } else { if (!riscv_address_valid_p(x, 0, mode)) return 0; } num_insns = riscv_address_insns_1(mode, x, might_split_p); return num_insns; }" GCC,riscv,352,riscv_epilogue_uses (rtx_insn *insn) { if (GET_CODE (insn) == CALL_INSN) return true; if (GET_CODE (insn) == JUMP_INSN) return true; if (GET_CODE (insn) == RETURN_INSN) return true; return false; } GCC,riscv,353, GCC,riscv,354,"riscv_split_symbol (rtx addr, machine_mode mode, rtx *low_out, rtx temp) { if (mode == MAX_MACHINE_MODE) { if (GET_CODE (addr) == SYMBOL_REF) { rtx high = gen_rtx_HIGH (mode, addr); rtx low = gen_rtx_LOW (mode, addr); if (low_out) *low_out = low; return true; } else { return false; } } else { if (GET_CODE (addr) == MEM) { rtx high = gen_rtx_HIGH (mode, addr); rtx low = gen_rtx_LOW (mode, addr); if (low_out) *low_out = low; return true; } else { return false; } } }" GCC,riscv,355,"int riscv_address_cost(rtx x, machine_mode mode, addr_space_t as, bool speed) { if (as == ADDR_SPACE_GENERIC) { return 1; } else if (as == ADDR_SPACE_FLAT) { return 2; } else if (as == ADDR_SPACE_PIC) { return 3; } else { return 0; } }" GCC,riscv,356,"int riscv_address_insns(machine_mode mode, rtx x, bool might_split_p) { int num_insns = 0; if (mode == VOIDmode) return 0; if (might_split_p) { for (int i = 0; i < GET_MODE_SIZE(mode); i += 4) { if (!riscv_address_valid_p(x, i, mode)) return 0; } } else { if (!riscv_address_valid_p(x, 0, mode)) return 0; } num_insns = riscv_address_insns_1(mode, x, might_split_p); return num_insns; }" GCC,riscv,357,"riscv_add_offset (void *reg, int offset, int temp) { if (offset >= 0 && offset < 32) { return reg + offset; } else { temp = riscv_force_temporary (temp); return riscv_add (reg, temp, offset); } }" GCC,riscv,358,"void riscv_adjust_block_mem(rtx mem, rtx length, rtx *loop_reg, rtx *loop_mem) { rtx base_reg = gen_rtx_REG(SImode, 0); rtx loop_mem = gen_rtx_MEM(SImode, base_reg); *loop_reg = base_reg; *loop_mem = loop_mem; }" GCC,riscv,359,riscv_allocate_stack_slots_for_args (struct gcc_target_args *args) { for (int i = 0; i < args->num_args; i++) { struct gcc_target_arg *arg = &args->args[i]; if (arg->type == RISCV_ARG_TYPE_REGISTER) { arg->stack_slot = riscv_allocate_stack_slot (args->stack_slot_size); } } } GCC,riscv,360,"riscv_arg_partial_bytes (tree type, tree size, int *partial_bytes) { if (TREE_CODE (type) == INTEGER_TYPE && TYPE_PRECISION (type) > 64) { *partial_bytes = TYPE_PRECISION (type) - 64; } else if (TREE_CODE (type) == REAL_TYPE && TYPE_PRECISION (type) > 64) { *partial_bytes = TYPE_PRECISION (type) - 64; } else { *partial_bytes = 0; } }" GCC,riscv,361,riscv_asan_shadow_offset (unsigned long addr) { return (addr >> 29) & 0x7; } GCC,riscv,362,"int riscv_binary_cost(rtx x, int single_insns, int double_insns) { int cost = 0; if (GET_MODE_SIZE(GET_MODE(x)) <= UNITS_PER_WORD) cost = single_insns; else cost = double_insns; if (GET_MODE_SIZE(GET_MODE(x)) > UNITS_PER_WORD) cost *= GET_MODE_SIZE(GET_MODE(x)) / UNITS_PER_WORD; return cost; }" GCC,riscv,363,"void riscv_block_move_loop(void *dest, const void *src, uint32_t length, uint32_t bytes_per_iter) { uint32_t i; for (i = 0; i < length; i += bytes_per_iter) { uint32_t *dest_ptr = (uint32_t *)dest + i; uint32_t *src_ptr = (uint32_t *)src + i; *dest_ptr = *src_ptr; } }" GCC,riscv,364,"void riscv_block_move_straight(uint8_t *dest, uint8_t *src, uint32_t length) { uint32_t i; for (i = 0; i < length; i++) { dest[i] = src[i]; } }" GCC,riscv,365,"int riscv_build_integer(rtx *codes, int value) { int num_ops = 0; rtx reg = gen_reg_rtx(SImode); emit_insn(gen_movsi(reg, value)); num_ops++; emit_insn(gen_movsi(gen_rtx_MEM(SImode, codes), reg)); num_ops++; return num_ops; }" GCC,riscv,366,"void riscv_call_tls_get_addr(rtx sym, rtx type, rtx result) { if (sym->type != RISCV_TLS_GET_ADDR_SYM) { error(""Invalid TLS symbol""); } if (type != RISCV_TLS_GET_ADDR_TYPE_GLOBAL_DYNAMIC && type != RISCV_TLS_GET_ADDR_TYPE_LOCAL_DYNAMIC) { error(""Invalid TLS type""); } rtx tls_get_addr_insn = gen_rtx_INSN(RISCV_TLS_GET_ADDR, result, sym, type); emit_insn(tls_get_addr_insn); }" GCC,riscv,367,riscv_cannot_copy_insn_p (rtx_insn *insn) { if (GET_CODE (insn) != INSN || GET_MODE (insn) != VOIDmode || GET_CODE (PATTERN (insn)) != SET || GET_CODE (SET_SRC (PATTERN (insn))) != REG || GET_CODE (SET_DEST (PATTERN (insn))) != REG) return false; if (GET_CODE (SET_SRC (PATTERN (insn))) == REG && GET_CODE (SET_DEST (PATTERN (insn))) == REG && REGNO (SET_SRC (PATTERN (insn))) == REGNO (SET_DEST (PATTERN (insn)))) return true; return false; } GCC,riscv,368,"riscv_cannot_force_const_mem (rtx op, enum machine_mode mode) { if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == SYMBOL_REF) { if (SYMBOL_REF_CONSTANT_P (XEXP (op, 0))) { if (riscv_valid_const_mem_ref_p (op, mode)) return false; } } return true; } bool riscv_cannot_force_const_mem (rtx op, enum machine_mode mode) { return riscv_cannot_force_const_mem (op, mode); } ``` This function checks if the operand is a constant memory reference, and if so, it checks if the symbol is a constant and if the memory reference is a valid constant memory reference. If the memory reference is not a valid constant memory reference, the function returns `true`, indicating that the operand cannot be forced to be a constant memory reference. You can then use this function in your GCC backend to check if an operand can be forced to be a constant memory reference. For example: ``` if (riscv_cannot_force_const_mem (operand, mode)) { error (""operand %d cannot be forced to be a constant memory reference"", operand); return false; }" GCC,riscv,369,"bool riscv_canonicalize_int_order_test(riscv_code code, riscv_operand cmp1, riscv_mode mode) { if (!riscv_code_valid_for_mode(code, mode)) return false; if (!riscv_operand_valid_for_int_order_test(cmp1, mode)) return false; if (!riscv_can_combine_code_and_operand_for_int_order_test(code, cmp1, mode)) return false; riscv_code_update_for_int_order_test(code, cmp1, mode); return true; }" GCC,riscv,370, GCC,riscv,371, GCC,riscv,372,riscv_can_use_return_insn (rtx_insn *insn) { if (insn->epilogue == NULL) return true; if (insn->frame_pointer != NULL) return false; for (rtx_insn *insn2 = insn->next; insn2 != NULL; insn2 = insn2->next) { if (insn2->code == CODE_FOR_stack_adjust) return false; } return true; } GCC,riscv,373,"bool riscv_classify_address(rtx x, machine_mode mode, struct riscv_address_info *info, bool strict_p) { if (!riscv_valid_address_p(x, mode)) return false; info->mode = mode; info->address = x; info->strict_p = strict_p; return true; }" GCC,riscv,374,"enum riscv_symbol_classification { RISCV_SYMBOL_CLASS_NORMAL, RISCV_SYMBOL_CLASS_GOT, RISCV_SYMBOL_CLASS_PLT, RISCV_SYMBOL_CLASS_TLS, }; static enum riscv_symbol_classification riscv_classify_symbol (rtx x) { if (GET_CODE (x) == SYMBOL_REF) { if (SYMBOL_REF_TLS_MODEL (x) != TLS_MODEL_NONE) return RISCV_SYMBOL_CLASS_TLS; else if (SYMBOL_REF_GOT_OFFSET (x) != 0) return RISCV_SYMBOL_CLASS_GOT; else if (SYMBOL_REF_PLT_OFFSET (x) != 0) return RISCV_SYMBOL_CLASS_PLT; else return RISCV_SYMBOL_CLASS_NORMAL; } else if (GET_CODE (x) == LABEL_REF) { if (LABEL_REF_TLS_MODEL (x) != TLS_MODEL_NONE) return RISCV_SYMBOL_CLASS_TLS; else return RISCV_SYMBOL_CLASS_NORMAL; } else gcc_unreachable (); }" GCC,riscv,375,"enum riscv_symbolic_expression_type { RISCV_SYMBOLIC_EXPRESSION_TYPE_NONE, RISCV_SYMBOLIC_EXPRESSION_TYPE_REGISTER, RISCV_SYMBOLIC_EXPRESSION_TYPE_MEMORY, RISCV_SYMBOLIC_EXPRESSION_TYPE_IMMEDIATE, RISCV_SYMBOLIC_EXPRESSION_TYPE_LABEL, RISCV_SYMBOLIC_EXPRESSION_TYPE_SYMBOL, RISCV_SYMBOLIC_EXPRESSION_TYPE_CONSTANT, RISCV_SYMBOLIC_EXPRESSION_TYPE_UNKNOWN, }; enum riscv_symbolic_expression_type riscv_classify_symbolic_expression(const char *x) { if (x == NULL) { return RISCV_SYMBOLIC_EXPRESSION_TYPE_NONE; }" GCC,riscv,376,riscv_class_max_nregs (void) { return riscv_target_info.max_nregs; } GCC,riscv,377,"riscv_compressed_lw_address_p (rtx x) { if (GET_CODE (x) != MEM) return false; if (GET_MODE (x) != QImode && GET_MODE (x) != HImode) return false; if (GET_CODE (XEXP (x, 0)) != REG) return false; if (REGNO (XEXP (x, 0)) != RISCV_ZERO_REGNUM) return false; return true; }" GCC,riscv,378,riscv_compressed_lw_offset_p (unsigned int x) { return (x & 0x1f) == 0 && (x >> 5) < 0x20; } GCC,riscv,379,"bool riscv_compressed_reg_p (unsigned int regno) { return (regno >= 8 && regno <= 15); } ``` This function takes an `unsigned int` argument named `regno` and returns a `bool` value indicating whether the register number is in the range of 8 to 15, which is the range of registers that can be used in compressed instructions for the RISC-V backend of the GCC compiler. You can use this function in your GCC backend code to determine whether a given register number can be used in compressed instructions. For example: ``` if (riscv_compressed_reg_p (regno)) { } else { }" GCC,riscv,380,"static void riscv_conditional_register_usage(void) { riscv_target_conditional_register_usage(16, 31, 1, 16, 31, 1, 1, 0, 1); } void riscv_init_target(void) { riscv_init_target_info(); riscv_conditional_register_usage(); }" GCC,riscv,381,static unsigned int riscv_constant_alignment(unsigned int value) { switch (value) { case 0: return 0; case 1: return 1; case 2: return 2; case 4: return 4; case 8: return 8; case 16: return 16; case 32: return 32; case 64: return 64; case 128: return 128; case 256: return 256; case 512: return 512; case 1024: return 1024; case 2048: return 2048; case 4096: return 4096; case 8192: return 8192; case 16384: return 16384; case 32768: return 32768; case 65536: return 65536; default: return 0; } } GCC,riscv,382,int riscv_const_insns(int X) { if (X < 0 || X > 65535) return 0; int num_insns = 0; if (X <= 15) num_insns = 1; else if (X <= 31) num_insns = 2; else if (X <= 63) num_insns = 3; else if (X <= 127) num_insns = 4; else if (X <= 255) num_insns = 5; else if (X <= 511) num_insns = 6; else if (X <= 1023) num_insns = 7; else if (X <= 2047) num_insns = 8; else if (X <= 4095) num_insns = 9; else if (X <= 8191) num_insns = 10; else if (X <= 16383) num_insns = 11; else if (X <= 32767) num_insns = 12; else if (X <= 65535) num_insns = 13; return num_insns; } GCC,riscv,383, GCC,riscv,384,"static void riscv_emit_attribute(const char *name, int value) { fprintf(asm_out_file, ""\t.attribute %s, %d\n"", name, value); } void riscv_emit_attributes(void) { riscv_emit_attribute(""arch"", 1); riscv_emit_attribute(""unaligned_access"", 1); riscv_emit_attribute(""stack_align"", 8); } ``` In this implementation, the `riscv_emit_attribute` function takes two arguments: `name` and `value`. It uses the `fprintf` function to write the attribute to the assembly file, with the format `""\t.attribute %s, %d\n""`. The `riscv_emit_attributes` function is then called to emit the attributes for the RISC-V backend. Note that the `riscv_emit_attribute` function is declared as `static` to avoid conflicts with other functions that may have the same name. Also, the `riscv_emit_attributes` function is called in the `riscv_init` function, which is called at the beginning of the compilation process. ```c void riscv_init(void) { riscv_emit_attributes(); }" GCC,riscv,385,"void riscv_emit_binary(rtx dest, rtx code, rtx x, rtx y) { emit_insn(gen_rtx_SET(dest, gen_rtx_CODE(code, x, y))); }" GCC,riscv,386,"riscv_emit_float_compare (rtx *operands, enum rtx_code code) { rtx op0 = operands[0]; rtx op1 = operands[1]; rtx op2 = operands[2]; switch (code) { case EQ: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_EQ (VOIDmode, op1, op2))); break; case NE: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_NE (VOIDmode, op1, op2))); break; case GT: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_GT (VOIDmode, op1, op2))); break; case GE: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_GE (VOIDmode, op1, op2))); break; case LT: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LT (VOIDmode, op1, op2))); break; case LE: emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LE (VOIDmode, op1, op2))); break; default: gcc_unreachable (); } }" GCC,riscv,387," bool *invert_ptr) { riscv_emit_cmp(cmp0, cmp1, code); riscv_emit_branch(target, code); if (invert_ptr) { riscv_emit_xor(target, target, 1); *invert_ptr = true; } }" GCC,riscv,388,"riscv_emit_move (rtx dest, rtx src) { if (!can_create_pseudo_p ()) { if (GET_CODE (dest) == MEM && GET_CODE (src) == MEM) { emit_insn (gen_rtx_SET (dest, src)); } else if (GET_CODE (dest) == REG && GET_CODE (src) == REG) { emit_insn (gen_rtx_SET (dest, src)); } else { if (GET_CODE (dest) == MEM) { emit_insn (gen_rtx_SET (dest, src)); } else { emit_insn (gen_rtx_SET (dest, src)); } } } else { emit_insn (gen_rtx_SET (dest, src)); } }" GCC,riscv,389,"static void riscv_emit_set(rtx target, rtx src) { riscv_emit_insn(target, src); }" GCC,riscv,390,riscv_epilogue_uses (rtx_insn *insn) { if (GET_CODE (insn) == CALL_INSN) return true; if (GET_CODE (insn) == JUMP_INSN) return true; if (GET_CODE (insn) == RETURN_INSN) return true; return false; } GCC,riscv,391, GCC,riscv,392, GCC,riscv,393,"int riscv_expand_conditional_move(rtx dest, rtx op, rtx cons, rtx alt) { if (op == 0) return 0; if (cons == 0) return 0; if (alt == 0) return 0; if (dest == 0) return 0; if (op->code == COND_MOVE) { if (cons->code == REG && alt->code == REG) { if (cons->reg == alt->reg) { emit_insn(gen_movsi(dest, cons)); return 1; } else { emit_insn(gen_movsi(dest, alt)); return 1; } } else { emit_insn(gen_movsi(dest, cons)); return 1; } } else { emit_insn(gen_movsi(dest, alt)); return 1; } }" GCC,riscv,394, GCC,riscv,395,"static inline void riscv_expand_int_scc(int op0, int op1, int target) { if (op0 == op1) { target = 1; } else { target = 0; } }" GCC,riscv,396,"static void riscv_expand_prologue (void) { tree func = current_function_decl; tree proto = TREE_TYPE (func); int riscv = 0; int zero = 1; int one = 0; int two = 1; int three = 1; int four = 0; int five = 1; int six = 1; expand_prologue (func, proto, riscv, zero, one, two, three, four, five, six); }" GCC,riscv,397,"static inline void riscv_extend_comparands(rtx op0, rtx op1) { if (GET_MODE(op0) == QImode) { op0 = gen_rtx_ZERO_EXTEND(HImode, op0); } else if (GET_MODE(op0) == HImode) { op0 = gen_rtx_SIGN_EXTEND(SImode, op0); } if (GET_MODE(op1) == QImode) { op1 = gen_rtx_ZERO_EXTEND(HImode, op1); } else if (GET_MODE(op1) == HImode) { op1 = gen_rtx_SIGN_EXTEND(SImode, op1); } }" GCC,riscv,398,int riscv_extend_cost(rtx op) { switch (GET_CODE(op)) { case CONST_INT: return 0; case REG: return 1; case MEM: return 2; default: return 3; } } GCC,riscv,399,"void riscv_file_start(void) { fprintf(asm_out_file, ""\t.option %spic\n"", riscv_pic_option); fprintf(asm_out_file, ""\t.option norelax\n""); }" GCC,riscv,400,static int riscv_first_stack_step(void) { int size = 0; size += riscv_callee_saved_regs_size(); size += riscv_stack_frame_size(); return size; } GCC,riscv,401,"riscv_flatten_aggregate_argument (tree type, int regno) { if (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE && TREE_CODE (type) != QUAL_UNION_TYPE) return false; if (TYPE_FIELDS (type) == NULL_TREE || TREE_CHAIN (TYPE_FIELDS (type)) == NULL_TREE || TREE_CHAIN (TREE_CHAIN (TYPE_FIELDS (type))) != NULL_TREE) return false; if (TARGET_FLOAT_ARGS && TYPE_MODE (type) == SFmode && regno < FIRST_FP_REGNUM + 2) return true; return false; }" GCC,riscv,402,"static void riscv_force_address(rtx x, enum machine_mode mode) { if (!riscv_valid_address_p(x, mode)) { rtx reg = gen_reg_rtx(mode); emit_move_insn(reg, x); x = reg; } }" GCC,riscv,403,"riscv_force_binary (riscv_reg_t code, riscv_reg_t x, riscv_reg_t y, riscv_mode_t mode) { riscv_reg_t result; result = code * x + y; result = riscv_reg_new (mode, result); return result; }" GCC,riscv,404,"riscv_force_temporary (rtx value, rtx dest) { if (riscv_new_pseudos_allowed_p ()) { rtx reg = gen_reg_rtx (GET_MODE (value)); emit_move_insn (reg, value); return reg; } else { emit_move_insn (dest, value); return dest; } }" GCC,riscv,405,"void riscv_for_each_saved_reg(void (*fn)(int reg, int sp_offset)) { int reg; int sp_offset; for (reg = 0; reg < RISCV_NUM_REGS; reg++) { if (riscv_reg_saved(reg)) { sp_offset = riscv_reg_sp_offset(reg); fn(reg, sp_offset); } } } ``` This function takes a pointer to a function `fn` as an argument, which is called for each register that is saved by the current function. The `riscv_reg_saved` function is used to check if a register is saved by the current function, and the `riscv_reg_sp_offset` function is used to get the offset of the stack pointer from the start of the frame for each saved register. You can use this function in your GCC plugin to iterate over each register that is saved by the current function and perform some action on each register. For example, you could use this function to print the name of each saved register and its offset from the start of the frame: ```c void print_saved_regs(int reg, int sp_offset) { printf(""Register %d (%s) is saved at offset %d from the start of the frame\n"", reg, riscv_reg_name(reg), sp_offset); } int main() { riscv_for_each_saved_reg(print_saved_regs); return 0; }" GCC,riscv,406,"riscv_frame_set (rtx reg, rtx mem) { if (REG != NULL && GET_CODE (REG) != REG) return NULL; if (MEM != NULL && GET_CODE (MEM) != MEM) return NULL; return gen_rtx_SET (VOIDmode, MEM, REG); }" GCC,riscv,407,static void riscv_function_arg(struct gcc_target_function_arg *arg) { switch (arg->mode) { case TARGET_FUNCTION_ARG_REGISTER: break; case TARGET_FUNCTION_ARG_STACK: break; default: gcc_unreachable(); } } GCC,riscv,408,riscv_function_arg_advance (struct gcc_target_function_arg_advance *advance) { advance->arg_reg = RISCV_ARG_REG; advance->arg_reg_size = RISCV_ARG_REG_SIZE; advance->arg_reg_align = RISCV_ARG_REG_ALIGN; advance->arg_stack_size = RISCV_ARG_STACK_SIZE; advance->arg_stack_align = RISCV_ARG_STACK_ALIGN; } GCC,riscv,409,static unsigned int riscv_function_arg_boundary(tree type) { unsigned int alignment = TYPE_ALIGN(type); if (alignment < PARM_BOUNDARY) return PARM_BOUNDARY; if (alignment > STACK_BOUNDARY) return STACK_BOUNDARY; return alignment; } GCC,riscv,410, GCC,riscv,411,"riscv_function_value (tree type, tree decl, tree value) { if (TREE_CODE (type) == FUNCTION_TYPE) { if (TREE_CODE (value) == CALL_EXPR) { tree call_expr = value; tree return_type = TREE_TYPE (call_expr); if (return_type == NULL_TREE) return_type = void_type_node; tree mode = TYPE_MODE (return_type); if (mode == VOIDmode) mode = TYPE_MODE (type); if (mode == VOIDmode) return; tree valtype = TREE_TYPE (mode); if (valtype == NULL_TREE) valtype = void_type_node; if (valtype != return_type) return; }" GCC,riscv,412,"riscv_gen_gpr_save_insn (rtx_insn *insn) { rtx_insn *pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 1), gen_rtx_REG (SImode, 2)))); riscv_set_target_specific_value (pat, 1, 2, 1, 0, 1, 1, 3, 0, 1); emit_insn (pat); }" GCC,riscv,413,"enum riscv_interrupt_type { RISCV_INTERRUPT_TYPE_UNKNOWN, RISCV_INTERRUPT_TYPE_USER, RISCV_INTERRUPT_TYPE_SUPERVISOR, }; static enum riscv_interrupt_type riscv_get_interrupt_type(tree decl) { if (TREE_CODE(decl) != FUNCTION_DECL) return RISCV_INTERRUPT_TYPE_UNKNOWN; tree attrs = DECL_ATTRIBUTES(decl); tree interrupt_attr = lookup_attribute(""interrupt"", attrs); if (!interrupt_attr) return RISCV_INTERRUPT_TYPE_UNKNOWN; tree user_attr = lookup_attribute(""user"", attrs); tree supervisor_attr = lookup_attribute(""supervisor"", attrs); if (user_attr && supervisor_attr) return RISCV_INTERRUPT_TYPE_UNKNOWN; if (user_attr) return RISCV_INTERRUPT_TYPE_USER; if (supervisor_attr) return RISCV_INTERRUPT_TYPE_SUPERVISOR; return RISCV_INTERRUPT_TYPE_UNKNOWN; }" GCC,riscv,414,"riscv_gpr_save_operation_p (rtx_insn *insn) { if (GET_CODE (insn) != SET || GET_CODE (XEXP (insn, 0)) != REG || GET_CODE (XEXP (insn, 1)) != MEM || GET_MODE (XEXP (insn, 0)) != SImode || GET_MODE (XEXP (insn, 1)) != SImode) return false; if (GET_CODE (XEXP (insn, 1)) == MEM && GET_CODE (XEXP (XEXP (insn, 1), 0)) == REG && REGNO (XEXP (XEXP (insn, 1), 0)) == 0) return true; return false; }" GCC,riscv,415," bool *no_add_attrs) { if (TREE_CODE (decl) != FUNCTION_DECL) { error (""%qE attribute only applies to functions"", name); *no_add_attrs = true; return; } if (is_attribute_p (""riscv"", name)) { if (args == NULL_TREE) { error (""%qE attribute requires an argument"", name); *no_add_attrs = true; return; } if (!is_valid_riscv_target (args)) { error (""%qE attribute argument is not a valid RISC-V target"", name); *no_add_attrs = true; return; } DECL_ATTRIBUTES (decl) = tree_cons (name, args, DECL_ATTRIBUTES (decl)); } else { ... } }" GCC,riscv,416,"bool riscv_hard_regno_mode_ok(unsigned int regno, machine_mode mode) { if (regno >= RISCV_NUM_REGS) return false; if (regno < RISCV_NUM_HARD_REGS) return true; if (regno >= RISCV_NUM_HARD_REGS && regno < RISCV_NUM_REGS) return false; return false; }" GCC,riscv,417,static int riscv_hard_regno_nregs(int regno) { switch (regno) { case 0: return 1; case 1: return 1; case 2: return 1; case 3: return 1; case 4: return 1; case 5: return 1; case 6: return 1; case 7: return 1; case 8: return 1; case 9: return 1; case 10: return 1; case 11: return 1; case 12: return 1; case 13: return 1; case 14: return 1; case 15: return 1; case 16: return 1; case 17: return 1; case 18: return 1; case 19: return 1; case 20: return 1; case 21: return 1; case 22: return 1; case 23: return 1; case 24: return 1; case 25: return 1; case 26: return 1; case 27: return 1; case 28: return 1; case 29: return 1; case 30: return 1; case 31: return 1; default: return 0; } } GCC,riscv,418, GCC,riscv,419,"riscv_immediate_operand_p (rtx code, rtx x) { if (GET_CODE (code) != CODE_LABEL) return false; if (GET_CODE (x) != CONST_INT) return false; if (INTVAL (x) < 0 || INTVAL (x) > 1023) return false; return true; }" GCC,riscv,420,"riscv_initial_elimination_offset (rtx from, rtx to) { if (from == frame_pointer_rtx || from == arg_pointer_rtx) { if (to == stack_pointer_rtx || to == hard_frame_pointer_rtx) { } } }" GCC,riscv,421,void riscv_init_machine_status(void) { riscv_machine_status_t *status = riscv_machine_status_alloc(); riscv_machine_status_init(status); riscv_machine_status_set_current(status); } GCC,riscv,422,int riscv_integer_cost(int val) { int cost = 0; if (val < 0) { cost = 1; } cost += (sizeof(int) * 8) - __builtin_clz(val); return cost; } GCC,riscv,423,bool riscv_interrupt_type_p(tree type) { if (type == NULL) return false; if (TREE_CODE(type) != FUNCTION_TYPE) return false; if (TREE_CODE(type) == FUNCTION_TYPE && TREE_CODE(TREE_TYPE(type)) == INTERRUPT_TYPE) return true; return false; } GCC,riscv,424,"bool riscv_int_order_operand_ok_p (enum riscv_code code, rtx cmp1) { switch (code) { case CODE_FOR_slt: case CODE_FOR_sltu: return cmp1 == const0_rtx; case CODE_FOR_sgt: case CODE_FOR_sgtu: return cmp1 == constm1_rtx; case CODE_FOR_sle: case CODE_FOR_sleu: return cmp1 == const1_rtx; case CODE_FOR_sge: case CODE_FOR_sgeu: return cmp1 == constm1_rtx; default: return false; } }" GCC,riscv,425,"riscv_in_small_data_p (tree exp) { if (TREE_CODE (exp) == VAR_DECL) { if (DECL_SECTION_NAME (exp) == NULL) return false; const char *section_name = TREE_STRING_POINTER (DECL_SECTION_NAME (exp)); if (strcmp (section_name, "".sdata"") == 0 || strcmp (section_name, "".sbss"") == 0) return true; } return false; }" GCC,riscv,426,static int riscv_issue_rate(void) { int issue_rate; issue_rate = riscv_target_specific_value(RISCV_TARGET_SPECIFIC_ISSUE_RATE); return issue_rate; } GCC,riscv,427,"riscv_legitimate_address_p (rtx addr, machine_mode mode, addr_space_t as) { if (REG_P (addr)) return true; if (CONSTANT_P (addr)) { if (riscv_legitimate_constant_p (addr, mode, as)) return true; } if (MEM_P (addr)) { if (riscv_legitimate_memory_reference_p (addr, mode, as)) return true; } return false; }" GCC,riscv,428,"riscv_legitimate_constant_p (rtx x) { if (GET_CODE (x) == CONST_INT) return true; if (GET_CODE (x) == CONST_DOUBLE) return true; if (GET_CODE (x) == CONST_VECTOR) return true; return false; } ``` This function checks if the given RTX `x` is a legitimate constant for the RISC-V backend of GCC. It returns `true` if `x` is a `CONST_INT`, `CONST_DOUBLE`, or `CONST_VECTOR`, and `false` otherwise. You can use this function in your GCC plugin by calling it with the appropriate RTX value. For example: ```c rtx x = ...; if (riscv_legitimate_constant_p (x)) { } else { }" GCC,riscv,429,"riscv_legitimize_address (rtx x, machine_mode mode, int strict_p) { if (legitimate_address_p (mode, x, strict_p)) return x; if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) { if (REG_P (x)) { rtx new_x = gen_rtx_REG (mode, REGNO (x)); if (legitimate_address_p (mode, new_x, strict_p)) return new_x; } else if (MEM_P (x)) { rtx new_x = gen_rtx_MEM (mode, XEXP (x, 0)); if (legitimate_address_p (mode, new_x, strict_p)) return new_x; } } return NULL_RTX; }" GCC,riscv,430,"riscv_legitimize_call_address (rtx op) { if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF) return; rtx reg = gen_reg_rtx (Pmode); emit_move_insn (reg, op); op = reg; }" GCC,riscv,431,"riscv_legitimize_const_move (rtx dest, rtx src) { if (CONSTANT_P (src) && !CONST_INT_P (src)) { rtx reg = gen_reg_rtx (GET_MODE (dest)); emit_move_insn (reg, src); src = reg; } emit_move_insn (dest, src); }" GCC,riscv,432,"riscv_legitimize_move (rtx dest, rtx src) { if (GET_CODE (dest) == REG && GET_CODE (src) == REG) { emit_insn (gen_movsi (dest, src)); return; } if (GET_CODE (dest) == MEM && GET_CODE (src) == MEM) { rtx temp = gen_reg_rtx (SImode); emit_insn (gen_movsi (temp, src)); emit_insn (gen_movsi (dest, temp)); return; } if (GET_CODE (dest) == REG && GET_CODE (src) == MEM) { emit_insn (gen_movsi (dest, src)); return; } if (GET_CODE (dest) == MEM && GET_CODE (src) == REG) { emit_insn (gen_movsi (dest, src)); return; } rtx temp = gen_reg_rtx (SImode); emit_insn (gen_movsi (temp, src)); emit_insn (gen_movsi (dest, temp)); }" GCC,riscv,433,"riscv_legitimize_tls_address (rtx loc, rtx symbol_ref, rtx offset) { rtx addr; if (GET_CODE (loc) == REG) { addr = loc; } else if (GET_CODE (loc) == MEM) { rtx reg = gen_reg_rtx (Pmode); emit_move_insn (reg, loc); addr = reg; } else { rtx reg = gen_reg_rtx (Pmode); emit_move_insn (reg, gen_rtx_PLUS (Pmode, loc, offset)); addr = reg; } addr = gen_rtx_PLUS (Pmode, addr, offset); return addr; }" GCC,riscv,434,"int riscv_load_store_insns(rtx_insn *insn, rtx_insn *mem) { int num_insns = 0; if (GET_CODE(insn) == MEM) { if (GET_CODE(mem) == MEM) { num_insns = 2; } else { num_insns = 1; } } else { num_insns = 0; } return num_insns; }" GCC,riscv,435,riscv_memmodel_needs_amo_acquire (memmodel model) { switch (model) { case MEMMODEL_RELAXED: case MEMMODEL_CONSUME: case MEMMODEL_ACQUIRE: case MEMMODEL_RELEASE: return false; case MEMMODEL_ACQ_REL: case MEMMODEL_SEQ_CST: return true; default: gcc_unreachable (); } } GCC,riscv,436,riscv_memmodel_needs_release_fence (enum memmodel model) { switch (model) { case MEMMODEL_RELAXED: case MEMMODEL_CONSUME: case MEMMODEL_ACQUIRE: return false; case MEMMODEL_RELEASE: case MEMMODEL_ACQ_REL: case MEMMODEL_SEQ_CST: return true; default: gcc_unreachable (); } } GCC,riscv,437," enum reg_class rclass, int outer_code) { int cost = 0; switch (outer_code) { case CODE_FOR_memory_move: cost = 1; break; case CODE_FOR_memory_move_double: cost = 2; break; case CODE_FOR_memory_move_quad: cost = 4; break; default: break; } if (mode == QImode) { cost *= 2; } else if (mode == HImode) { cost *= 4; } else if (mode == SImode) { cost *= 8; } else if (mode == DImode) { cost *= 16; } if (rclass == GENERAL_REGS) { cost *= 2; } else if (rclass == FP_REGS) { cost *= 4; } return cost; }" GCC,riscv,438,"riscv_merge_decl_attributes (tree decl, tree attributes) { if (TREE_CODE (decl) == FUNCTION_DECL && lookup_attribute (""interrupt"", DECL_ATTRIBUTES (decl))) { error (""%qs function cannot have different interrupt type"", IDENTIFIER_POINTER (DECL_NAME (decl))); return; } decl->common.attributes = attributes; }" GCC,riscv,439,int riscv_min_arithmetic_precision(void) { return 32; } GCC,riscv,440,riscv_modes_tieable_p (machine_mode mode) { if (mode == SFmode || mode == DFmode) return false; return true; } GCC,riscv,441, GCC,riscv,442,bool riscv_naked_function_p(tree func) { return (func->common.target_specific_value & TARGET_SPECIFIC_VALUE_RISCV_NAKED) != 0; } GCC,riscv,443,"riscv_new_address_profitable_p (rtx_insn *insn) { if (GET_CODE (insn) != MEM) return false; rtx addr = XEXP (insn, 0); if (GET_CODE (addr) != PLUS || GET_CODE (XEXP (addr, 0)) != REG || GET_CODE (XEXP (addr, 1)) != CONST_INT) return false; HOST_WIDE_INT offset = INTVAL (XEXP (addr, 1)); if (offset % 4 != 0) return false; rtx reg = XEXP (addr, 0); if (!REG_P (reg) || REGNO (reg) >= FIRST_PSEUDO_REGISTER) return false; if (offset < 0 || offset >= 256) return false; return true; }" GCC,riscv,444,"static void riscv_option_override (void) { if (TARGET_RISCV) { if (riscv_abi != RISCV_ABI_INVALID) { if (riscv_preferred_stack_boundary != 0) { if (riscv_preferred_stack_boundary & (riscv_preferred_stack_boundary - 1)) error (""%<-mpreferred-stack-boundary=%d%> must be between %d and %d"", riscv_preferred_stack_boundary, RISCV_MIN_STACK_BOUNDARY, RISCV_MAX_STACK_BOUNDARY); }" GCC,riscv,445,"static void riscv_output_gpr_save(rtx_insn *insn) { rtx_insn *call_insn; rtx_insn *save_insn; call_insn = gen_rtx_CALL(VOIDmode, gen_rtx_SYMBOL_REF(VOIDmode, ""__riscv_save_gpr""), VOIDmode, NULL_RTX, NULL_RTX); save_insn = gen_rtx_SAVE(VOIDmode, NULL_RTX, NULL_RTX); call_insn = gen_rtx_SEQUENCE(VOIDmode, call_insn, save_insn); return call_insn; }" GCC,riscv,446, GCC,riscv,447, GCC,riscv,448, GCC,riscv,449, GCC,riscv,450,riscv_pass_aggregate_in_fpr_and_gpr_p (tree type) { if (TREE_CODE (type) != RECORD_TYPE || TYPE_BIT_FIELD_BITS (type) != 0) return false; for (tree field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { if (TREE_CODE (field) == FIELD_DECL && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE && TYPE_MODE (TREE_TYPE (field)) == TYPE_MODE (type)) return true; } for (tree field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { if (TREE_CODE (field) == FIELD_DECL && TREE_CODE (TREE_TYPE (field)) == INTEGER_TYPE && TYPE_MODE (TREE_TYPE (field)) == TYPE_MODE (type)) return true; } return false; } GCC,riscv,451, GCC,riscv,452," const_tree type, bool named_arg) { if (AGGREGATE_TYPE_P (type) || (GET_MODE_SIZE (mode) > 8 && GET_MODE_CLASS (mode) == MODE_INT)) return true; return false; }" GCC,riscv,453," enum machine_mode mode2, int offset2) { if (regno == regno1 && regno == regno2) return; if (regno == regno2 && regno == regno1) return; if (regno != regno1 && regno != regno2) { riscv_pass_fpr(regno1, mode1, offset1); riscv_pass_fpr(regno2, mode2, offset2); } }" GCC,riscv,454,"riscv_pass_fpr_single (tree type, tree value, int regno) { if (TYPE_MODE (type) == SFmode && TYPE_MODE (value) == BLKmode) { tree field = TYPE_FIELDS (type); tree field_type = TREE_TYPE (field); tree field_value = build_simple_mem_ref (field); riscv_pass_by_reference (field_type, field_value, regno); } else { riscv_pass_by_value (type, value, regno); } }" GCC,riscv,455,int riscv_pass_mode_in_fpr_p (machine_mode mode) { if (mode == SFmode || mode == DFmode || mode == TFmode) return 1; else if (mode == SCmode || mode == DCmode || mode == TCmode) return 2; else return 0; } GCC,riscv,456,"riscv_print_operand (FILE *stream, rtx op, int letter) { switch (letter) { case 'h': if (GET_CODE (op) == HIGH) { rtx reloc = XEXP (op, 0); if (GET_CODE (reloc) == CONST_INT) { fprintf (stream, ""high(%d)"", INTVAL (reloc)); return; } } break; case 'R': if (GET_CODE (op) == LOW) { rtx reloc = XEXP (op, 0); if (GET_CODE (reloc) == CONST_INT) { fprintf (stream, ""low(%d)"", INTVAL (reloc)); return; } } break; case 'C': if (GET_CODE (op) == COMPARE) { rtx cond = XEXP (op, 0); if (GET_CODE (cond) == REG) { fprintf (stream, ""cmp %s, %s"", riscv_print_operand (stream, XEXP (cond, 0), 0), riscv_print_operand (stream, XEXP (cond, 1), 0)); return; } } break; case 'A': if (GET_CODE (op) == MEM) { rtx mem = XEXP (op, 0); if (GET_CODE (mem) == REG) { fprintf (stream, ""atomic %s"", riscv_print_operand (stream, mem, 0)); return; } }" GCC,riscv,457,"void riscv_print_operand_address(const char *operand) { if (operand == NULL) return; if (strcmp(operand, ""sp"") == 0) printf(""(sp)""); else if (strcmp(operand, ""fp"") == 0) printf(""(fp)""); else if (strcmp(operand, ""ra"") == 0) printf(""(ra)""); else printf(""(%s)"", operand); }" GCC,riscv,458," int hi_reloc) { if (hi_reloc) { if (strcmp (op, ""%hi"") == 0) fprintf (stderr, ""%s"", ""%hi""); else if (strcmp (op, ""%lo"") == 0) fprintf (stderr, ""%s"", ""%lo""); else if (strcmp (op, ""%pcrel_hi"") == 0) fprintf (stderr, ""%s"", ""%pcrel_hi""); else if (strcmp (op, ""%pcrel_lo"") == 0) fprintf (stderr, ""%s"", ""%pcrel_lo""); else if (strcmp (op, ""%tprel_hi"") == 0) fprintf (stderr, ""%s"", ""%tprel_hi""); else if (strcmp (op, ""%tprel_lo"") == 0) fprintf (stderr, ""%s"", ""%tprel_lo""); else fprintf (stderr, ""invalid use of '%%%c'"", op[1]); } else { if (strcmp (op, ""%s("") == 0) fprintf (stderr, ""%s"", ""%s(""); else fprintf (stderr, ""invalid use of '%%%c'"", op[1]); } }" GCC,riscv,459,"enum riscv_promote_function_mode { RISCV_PROMOTE_FUNCTION_MODE_ALWAYS_PROMOTE, RISCV_PROMOTE_FUNCTION_MODE_NEVER_PROMOTE, RISCV_PROMOTE_FUNCTION_MODE_PROMOTE_IF_NEEDED }; static enum riscv_promote_function_mode riscv_promote_function_mode (tree type) { if (type == NULL_TREE) return RISCV_PROMOTE_FUNCTION_MODE_ALWAYS_PROMOTE; if (TREE_CODE (type) == INTEGER_TYPE) { if (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) return RISCV_PROMOTE_FUNCTION_MODE_PROMOTE_IF_NEEDED; else return RISCV_PROMOTE_FUNCTION_MODE_NEVER_PROMOTE; } return RISCV_PROMOTE_FUNCTION_MODE_ALWAYS_PROMOTE; }" GCC,riscv,460,"static int riscv_register_move_cost(int class, int mode, int reg1, int reg2) { int cost = 0; if (class == reg1 && class == reg2) { cost = 0; } else { cost = 1; } return cost; }" GCC,riscv,461, GCC,riscv,462," bool strict_p) { if (regno >= FIRST_PSEUDO_REGISTER) return false; if (strict_p) { if (!TEST_HARD_REG_BIT (reg_class_contents[BASE_REGS], regno)) return false; } else { if (regno >= FIRST_FIXED_REGISTER || regno >= FIRST_PSEUDO_REGISTER) return false; } if (GET_MODE_CLASS (mode) == MODE_INT && regno >= FIRST_PSEUDO_REGISTER) { if (!TEST_HARD_REG_BIT (reg_class_contents[BASE_REGS], regno)) return false; }" GCC,riscv,463,riscv_reorg (void) { if (TARGET_RISCV) { } } GCC,riscv,464,"void riscv_restore_reg(uint32_t reg, uint32_t mem) { asm volatile(""lw %0, %1"" : ""=r""(reg) : ""m""(mem)); }" GCC,riscv,465,"riscv_return_addr (rtx_insn *insn, rtx_insn *next, rtx_insn *prev) { rtx_insn *return_addr_insn; rtx_insn *return_addr_next; rtx_insn *return_addr_prev; return_addr_insn = get_return_address_insn (insn); if (!return_addr_insn) return NULL_RTX; return_addr_next = next_insn (return_addr_insn); return_addr_prev = prev_insn (return_addr_insn); if (!return_addr_next || !return_addr_prev) return NULL_RTX; if (!return_addr_next->is_return_address_insn () || !return_addr_prev->is_return_address_insn ()) return NULL_RTX; return return_addr_insn; }" GCC,riscv,466,"riscv_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED) { return true; }" GCC,riscv,467, GCC,riscv,468,"void riscv_save_reg(int reg, int mem) { asm volatile(""sw %0, %1"" : ""=r"" (reg) : ""=m"" (mem)); }" GCC,riscv,469, GCC,riscv,470,"void riscv_save_restore_reg(int regno, int mode, int offset) { if (mode == RISCV_MODE_SAVE) { riscv_push(regno); } else if (mode == RISCV_MODE_RESTORE) { riscv_pop(regno); } riscv_addi(RISCV_REG_SP, RISCV_REG_SP, offset); }" GCC,riscv,471,"riscv_secondary_memory_needed (rtx op, enum machine_mode mode) { if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE_SIZE (mode) > GET_MODE_SIZE (SImode)) return true; return false; }" GCC,riscv,472,"riscv_select_section (tree decl, int reloc) { if (TREE_CODE (decl) == VAR_DECL) { if (DECL_INITIAL (decl) == NULL_TREE || !TREE_READONLY (decl) || TREE_THIS_VOLATILE (decl)) return; if (TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE) { if (TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE) { if (TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == RECORD_TYPE || TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == UNION_TYPE) return; } else { if (TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == RECORD_TYPE || TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == UNION_TYPE) return; } } if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) { if (TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == RECORD_TYPE || TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == UNION_TYPE) return; } if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE) { if (TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == RECORD_TYPE || TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == UNION_TYPE) return; }" GCC,riscv,473," tree va_list_type) { if (TARGET_RISCV) { tree arg_types = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); tree new_arg_types = NULL_TREE; for (int i = 0; i < TREE_VEC_LENGTH (arg_types); i++) { tree arg_type = TREE_VEC_ELT (arg_types, i); tree new_arg_type = NULL_TREE; if (TREE_CODE (arg_type) == REFERENCE_TYPE) { new_arg_type = build_reference_type (TREE_TYPE (arg_type)); } else if (TREE_CODE (arg_type) == POINTER_TYPE) { new_arg_type = build_pointer_type (TREE_TYPE (arg_type)); } else { new_arg_type = arg_type; } new_arg_types = tree_cons (NULL_TREE, new_arg_type, new_arg_types); }" GCC,riscv,474,"riscv_set_current_function (tree decl, bool is_naked) { if (is_naked && TREE_CODE (decl) == FUNCTION_DECL) { if (TREE_CODE (TREE_TYPE (decl)) == INTERRUPT_TYPE) error (""% function cannot return a value""); else if (TREE_CODE (TREE_TYPE (decl)) == NAKED_TYPE) error (""% function cannot have arguments""); } current_function_decl = decl; riscv_target_specific_value = 1; }" GCC,riscv,475,"void riscv_set_frame_expr(riscv_insn *insn, const char *frame_pattern) { insn->is_frame_related = 1; insn->frame_pattern = frame_pattern; }" GCC,riscv,476,"void riscv_set_return_address(word_mode_gpr ADDRESS, word_mode_gpr SCRATCH) { asm volatile(""mv %0, %1\n\t"" ""mv %2, %3\n\t"" ""mv %4, %5\n\t"" : ""=r"" (ADDRESS), ""=r"" (SCRATCH) : ""r"" (ADDRESS), ""r"" (SCRATCH) : ""memory""); }" GCC,riscv,477,"riscv_shorten_lw_offset (rtx insn, rtx *operands) { rtx base, offset; base = operands[0]; offset = operands[1]; if (GET_CODE (offset) == CONST_INT && INTVAL (offset) >= 0 && INTVAL (offset) < 64) { base = plus_constant (base, INTVAL (offset)); offset = const0_rtx; } operands[0] = base; operands[1] = offset; }" GCC,riscv,478, unsigned int align) { } GCC,riscv,479, GCC,riscv,480,"int riscv_split_const_insns(rtx x) { int num_insns = 0; if (GET_MODE(x) == DImode) { rtx low = gen_lowpart(SImode, x); rtx high = gen_highpart(SImode, x); emit_insn(gen_movsi(low, x)); emit_insn(gen_movsi(high, x)); num_insns += 2; } return num_insns; }" GCC,riscv,481,"riscv_split_doubleword_move (rtx dest, rtx src, rtx scratch) { if (TARGET_64BIT) { if (GET_CODE (dest) == REG && REGNO (dest) < FIRST_PSEUDO_REGISTER) { rtx dest_lo = gen_rtx_REG (DImode, REGNO (dest)); rtx dest_hi = gen_rtx_REG (DImode, REGNO (dest) + 1); rtx src_lo = gen_rtx_REG (DImode, REGNO (src)); rtx src_hi = gen_rtx_REG (DImode, REGNO (src) + 1); emit_insn (gen_movdi (dest_lo, src_lo)); emit_insn (gen_movdi (dest_hi, src_hi)); } else { rtx dest_lo = gen_rtx_MEM (DImode, dest); rtx dest_hi = gen_rtx_MEM (DImode, plus_constant (dest, 8)); rtx src_lo = gen_rtx_MEM (DImode, src); rtx src_hi = gen_rtx_MEM (DImode, plus_constant (src, 8)); emit_insn (gen_movdi (dest_lo, src_lo)); emit_insn (gen_movdi (dest_hi, src_hi)); }" GCC,riscv,482,"void riscv_split_integer(uint64_t x, uint32_t *lo, uint32_t *hi) { *lo = (uint32_t)x; *hi = (uint32_t)(x >> 32); }" GCC,riscv,483,"int riscv_split_integer_cost(rtx val, int scratch_reg) { int cost = 0; if (GET_CODE(val) == CONST_INT) { cost = 1; } else if (GET_CODE(val) == REG) { cost = 0; } else if (GET_CODE(val) == MEM) { cost = 1; } else if (GET_CODE(val) == PLUS) { cost = 2; } else if (GET_CODE(val) == MINUS) { cost = 2; } else if (GET_CODE(val) == MULT) { cost = 2; } else if (GET_CODE(val) == DIV) { cost = 2; }" GCC,riscv,484,"riscv_split_symbol (rtx addr, machine_mode mode, rtx *low_out, rtx temp) { if (mode == MAX_MACHINE_MODE) { if (GET_CODE (addr) == SYMBOL_REF) { rtx high = gen_rtx_HIGH (mode, addr); rtx low = gen_rtx_LOW (mode, addr); if (low_out) *low_out = low; return true; } else { return false; } } else { if (GET_CODE (addr) == MEM) { rtx high = gen_rtx_HIGH (mode, addr); rtx low = gen_rtx_LOW (mode, addr); if (low_out) *low_out = low; return true; } else { return false; } } }" GCC,riscv,485,"riscv_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn) { if (!single_set (out_insn) || !single_set (in_insn)) return false; if (!MEM_P (SET_DEST (in_insn))) return false; rtx_insn *out_set = single_set (out_insn); rtx_insn *in_set = single_set (in_insn); rtx out_dest = SET_DEST (out_set); rtx in_dest = SET_DEST (in_set); if (REG_P (out_dest) && REG_P (in_dest) && REGNO (out_dest) == REGNO (in_dest)) return false; if (GET_CODE (in_insn) == PARALLEL) { for (int i = 0; i < XVECLEN (in_insn, 0); i++) { rtx_insn *x = XVECEXP (in_insn, 0, i); if (GET_CODE (x) != SET && GET_CODE (x) != CLOBBER && GET_CODE (x) != USE) return false; } }" GCC,riscv,486,"riscv_strip_unspec_address (rtx op) { if (GET_CODE (op) == UNSPEC && XINT (op, 1) == UNSPEC_ADDRESS) return XEXP (op, 0); return op; }" GCC,riscv,487,"uint32_t riscv_subword(uint64_t OP, bool HIGH_P) { if (HIGH_P) { return (uint32_t)(OP >> 32); } else { return (uint32_t)OP; } }" GCC,riscv,488,"riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type) { if (GET_CODE (x) == SYMBOL_REF) { *symbol_type = RISCV_SYMBOL_TYPE_SYMBOLIC; return true; } else if (GET_CODE (x) == LABEL_REF) { *symbol_type = RISCV_SYMBOL_TYPE_LABEL; return true; } else { *symbol_type = RISCV_SYMBOL_TYPE_UNKNOWN; return false; } }" GCC,riscv,489, GCC,riscv,490,"riscv_tls_symbol_p (rtx x) { if (GET_CODE (x) != SYMBOL_REF) return false; const char *name = XSTR (x, 0); if (strncmp (name, ""riscv,0"", 7) != 0) return false; return true; }" GCC,riscv,491,"riscv_unique_section (tree decl) { const char *section_name; if (TREE_CODE (decl) == FUNCTION_DECL) { section_name = "".text""; } else if (TREE_CODE (decl) == VAR_DECL) { section_name = "".data""; } else if (TREE_CODE (decl) == CONST_DECL) { section_name = "".rodata""; } else { section_name = "".bss""; } if (TREE_CODE (decl) == FUNCTION_DECL) { if (TREE_PUBLIC (decl)) section_name = "".gnu.linkonce.t.""; else section_name = "".gnu.linkonce.r.""; } else if (TREE_CODE (decl) == VAR_DECL) { if (TREE_PUBLIC (decl)) section_name = "".gnu.linkonce.d.""; else section_name = "".gnu.linkonce.b.""; } else if (TREE_CODE (decl) == CONST_DECL) { if (TREE_PUBLIC (decl)) section_name = "".gnu.linkonce.c.""; else section_name = "".gnu.linkonce.s.""; } else { section_name = "".gnu.linkonce.b.""; } section_name = riscv_section_name (section_name); return section_name; }" GCC,riscv,492,"riscv_unspec_address (void *address, enum riscv_symbol_type symbol_type) { return (void *)(address | (symbol_type << 16)); }" GCC,riscv,493,"riscv_unspec_address_offset (rtx base, rtx offset, rtx symbol_type) { rtx result; result = gen_rtx_UNSPEC (VOIDmode, base, symbol_type); result = gen_rtx_PLUS (VOIDmode, result, offset); return result; }" GCC,riscv,494,"riscv_unspec_offset_high (rtx base, rtx symbol_type, rtx temp) { if (GET_CODE (symbol_type) == CONST_INT && INTVAL (symbol_type) == 32) { rtx high_part = gen_rtx_PLUS (Pmode, base, gen_rtx_CONST_INT (Pmode, INTVAL (symbol_type))); return high_part; } else return base; }" GCC,riscv,495,"riscv_use_save_libcall (rtx_insn *insn) { if (GET_CODE (insn) == CALL_INSN && GET_CODE (XEXP (XEXP (insn, 0), 0)) == SYMBOL_REF && XSTR (XEXP (XEXP (insn, 0), 0), 0) == ""__riscv_save_gprs"" && GET_CODE (XEXP (XEXP (insn, 0), 1)) == SYMBOL_REF && XSTR (XEXP (XEXP (insn, 0), 1), 0) == ""__riscv_restore_gprs"") return true; return false; } ``` This function takes an `rtx_insn` pointer as an argument, which represents an instruction in the RISC-V assembly code. It checks if the instruction is a `CALL_INSN` and if the first operand is a `SYMBOL_REF` with the name `__riscv_save_gprs` and the second operand is also a `SYMBOL_REF` with the name `__riscv_restore_gprs`. If both conditions are true, it returns `true`, otherwise it returns `false`. You can then use this function in your RISC-V backend code to determine whether to call the GPR save/restore routines. For example: ``` if (riscv_use_save_libcall (insn)) { ... }" GCC,riscv,496,"riscv_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p) { if (GET_CODE (x) != REG) return false; if (strict_p) { if (!REG_OK_STRICT (x, mode)) return false; } else { if (!REG_OK (x, mode)) return false; }" GCC,riscv,497, target_specific_value target_specific_value) { if (mode == LO_SUM) { if (sym_type == SYM_TYPE_RISCV) { if (target_specific_value == 0 || target_specific_value == 1 || target_specific_value == 2) { return true; } } } return false; } GCC,riscv,498,"bool riscv_valid_offset_p(rtx x, machine_mode mode) { if (GET_CODE(x) == CONST_INT) { HOST_WIDE_INT offset = INTVAL(x); if (offset >= 0 && offset < 16) return true; } if (GET_CODE(x) == REG) { int regno = REGNO(x); if (regno >= 0 && regno < 32) return true; } return false; }" GCC,riscv,499,"void riscv_va_start(void *ap, void *fp) { va_list va; va_start(va, fp); void *arg_addr = va_arg(va, void *); void *next_arg_addr = va_arg(va, void *); void *last_arg_addr = va_arg(va, void *); va_list_set_first_arg(va, arg_addr); va_list_set_next_arg(va, next_arg_addr); va_list_set_last_arg(va, last_arg_addr); return va; }" GCC,riscv,500,"void riscv_warn_func_return(tree decl) { if (TREE_CODE(decl) == FUNCTION_DECL && TREE_CODE(TREE_TYPE(decl)) == VOID_TYPE) { warning(""function %qD has a return statement"", decl); } } ``` This function takes a `tree` argument representing the function declaration, and checks if the function has a return statement. If it does, it warns the user using the `warning` function. You can then use this function in your RISC-V backend implementation by calling it in the appropriate place, such as in the `finish_function` function: ``` void finish_function(tree decl) { riscv_warn_func_return(decl); }" GCC,riscv,501,"static inline riscv_reg_t riscv_zero_if_equal(riscv_reg_t cmp0, riscv_reg_t cmp1) { riscv_reg_t result; asm volatile(""cmp %0, %1\n\t"" ""beqz %2, %3\n\t"" : ""=r"" (result) : ""r"" (cmp0), ""r"" (cmp1), ""r"" (0) : ""cc""); return result; }" GCC,riscv,502,"bool extract_base_offset_in_addr(rtx mem, rtx *base, rtx *offset) { if (GET_CODE(mem) == MEM && GET_CODE(XEXP(mem, 0)) == PLUS) { *base = XEXP(XEXP(mem, 0), 0); *offset = XEXP(XEXP(mem, 0), 1); return true; } else { *base = NULL_RTX; *offset = NULL_RTX; return false; } }" GCC,riscv,503,"void split_plus(rtx x, rtx *base_ptr, rtx *offset_ptr) { if (GET_CODE(x) == PLUS && GET_CODE(XEXP(x, 0)) == CONST_INT) { *base_ptr = XEXP(x, 0); *offset_ptr = XEXP(x, 1); } else { *base_ptr = x; *offset_ptr = const0_rtx; } }" GCC,riscv,504," machine_mode mode, bool *reserved) { if (mem1->addr == mem2->addr + GET_MODE_SIZE (mode)) { *reserved = (mem1->addr < mem2->addr); return true; } return false; }" GCC,riscv,505, GCC,riscv,506, GCC,riscv,507,"th_mempair_order_operands (rtx_insn *insn, rtx_insn *next_insn) { rtx_insn *operands[2]; int i; if (!(next_insn->code == CODE_FOR_load_address || next_insn->code == CODE_FOR_store_address)) return false; for (i = 0; i < 2; i++) { operands[i] = next_insn->operands[i]; if (operands[i]->code != CODE_FOR_load_address && operands[i]->code != CODE_FOR_store_address) return false; } if (operands[0]->code == CODE_FOR_load_address && operands[1]->code == CODE_FOR_store_address) { rtx_insn *temp = operands[0]; operands[0] = operands[1]; operands[1] = temp; } for (i = 0; i < 2; i++) next_insn->operands[i] = operands[i]; return true; }" GCC,riscv,508," enum rtx_code code) { gcc_assert (operands[0] != NULL); gcc_assert (operands[1] != NULL); gcc_assert (operands[2] != NULL); gcc_assert (operands[3] != NULL); if (load_p) { if (mode == DI) { if (code == UNKNOWN) fprintf (asm_out_file, ""th.lwud\t%0, %1, (%2), %3, %4"", operands[0], operands[1], operands[2], operands[3], operands[4]); else if (code == SIGN_EXTEND) fprintf (asm_out_file, ""th.lwd\t%0, %1, (%2), %3, %4"", operands[0], operands[1], operands[2], operands[3], operands[4]); else if (code == ZERO_EXTEND) fprintf (asm_out_file, ""th.ldd\t%0, %1, (%2), %3, %4"", operands[0], operands[1], operands[2], operands[3], operands[4]); }" GCC,riscv,509,"th_mempair_prepare_save_restore_operands (rtx_insn *insn, rtx *operands) { int i; int num_operands = XVECLEN (PATTERN (insn), 0); for (i = 0; i < num_operands; i++) operands[i] = NULL_RTX; for (i = 0; i < num_operands; i++) { rtx op = XVECEXP (PATTERN (insn), 0, i); if (op == NULL_RTX) continue; switch (GET_CODE (op)) { case REG: operands[i] = op; break; case MEM: operands[i] = XEXP (op, 0); break; case CONST_INT: operands[i] = GEN_INT (INTVAL (op)); break; default: gcc_unreachable (); } } }" GCC,riscv,510,"th_mempair_restore_regs (rtx dest1, rtx dest2, rtx src1, rtx src2) { emit_insn (gen_rtx_SET (dest1, src1)); emit_insn (gen_rtx_SET (dest2, src2)); emit_insn (gen_rtx_SET (gen_rtx_REG (SImode, 4), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 4), gen_rtx_CONST_INT (SImode, 2)))); }" GCC,riscv,511,"th_mempair_save_regs (rtx_insn *insn, rtx op0, rtx op1, rtx op2, rtx op3) { rtx_insn *mem_insn = emit_insn (gen_rtx_MEM (SImode, gen_rtx_REG (SImode, op0))); rtx_insn *mem_insn2 = emit_insn (gen_rtx_MEM (SImode, gen_rtx_REG (SImode, op1))); RTX_FRAME_RELATED_P (mem_insn) = 1; RTX_FRAME_RELATED_P (mem_insn2) = 1; INSN_CODE (mem_insn) = CODE_FOR_th_mempair_save_regs; INSN_CODE (mem_insn2) = CODE_FOR_th_mempair_save_regs; INSN_OPERANDS (mem_insn) = op0; INSN_OPERANDS (mem_insn2) = op1; emit_insn (mem_insn); emit_insn (mem_insn2); }" GCC,riscv,512, GCC,arc,0,"arc_cpu_cpp_builtins (void) { builtin_define (""__arc__""); builtin_define (""__ARC_TLS_REGNO__""); builtin_define (""__BIG_ENDIAN__""); builtin_define (""__LITTLE_ENDIAN__""); builtin_define (""__big_endian__""); }" GCC,arc,1,"void arc_cpu_cpp_builtins(void) { const char *arc = ""arc""; const char *cpu = ""cpu=arc""; const char *machine = ""machine=arc""; const char *__arc__ = ""__arc__""; const char *__ARC_TLS_REGNO__ = ""__ARC_TLS_REGNO__""; const char *__ARC_LPC_WIDTH__ = ""__ARC_LPC_WIDTH__""; const char *__BIG_ENDIAN__ = ""__BIG_ENDIAN__""; const char *__LITTLE_ENDIAN__ = ""__LITTLE_ENDIAN__""; const char *__big_endian__ = ""__big_endian__""; printf(""Target specific values: %s, %s, %s, %s, %s, %s, %s, %s, %s\n"", arc, cpu, machine, __arc__, __ARC_TLS_REGNO__, __ARC_LPC_WIDTH__, __BIG_ENDIAN__, __LITTLE_ENDIAN__, __big_endian__); }" GCC,arc,2, GCC,arc,3,"void def_or_undef_macro(const char *macro, int value) { if (value) { define_macro(macro, value); } else { undef_macro(macro); } }" GCC,arc,4,int arc_address_cost(int ADDR) { int cost = 0; if (ADDR >= 0 && ADDR <= 15) { cost = arc_target_specific_value[ADDR]; } return cost; } GCC,arc,5,"arc_check_release31a (rtx_insn *insn) { if (TARGET_ARC && TARGET_RELEASE_310a) { rtx_insn *nop_insn = gen_nop (); emit_insn_before (nop_insn, insn); } } ``` This function checks if the target is ARC and the release is 310a, and if so, it generates a NOP instruction and inserts it before the current instruction. You can then use this function in the GCC backend to insert NOP instructions before conditional jumps when compiling for release 310a. Here is an example of how you could use this function in the GCC backend: ``` if (TARGET_ARC && TARGET_RELEASE_310a) { arc_check_release31a (insn); }" GCC,arc,6,arc_check_short_reg_p (rtx reg) { if (REGNO (reg) < 16) return true; if (REGNO (reg) >= 32 && REGNO (reg) < 48) return true; if (REGNO (reg) >= 64 && REGNO (reg) < 80) return true; return false; } GCC,arc,7,"int arc_output_addsi (rtx *operands, int cond_p, int output_p) { rtx op0 = operands[0]; rtx op1 = operands[1]; rtx op2 = operands[2]; if (cond_p) { emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_PLUS (SImode, op1, op2))); return 1; } else { emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_PLUS (SImode, op1, op2))); return 0; } }" GCC,arc,8, GCC,arc,9,arc_sched_issue_rate (void) { if (arc_target_info->issue_rate) { return arc_target_info->issue_rate; } else { return 1; } } GCC,arc,10, GCC,arc,11,"void split_addsi(int a, int b, int c, int d) { int e = a + b; int f = c + d; int g = e + f; return g; }" GCC,arc,12,"split_subsi (rtx *operands, int *len) { rtx op0 = operands[0]; rtx op1 = operands[1]; rtx op2 = operands[2]; rtx op3 = operands[3]; rtx op4 = operands[4]; if (GET_CODE (op1) == CONST_INT && INTVAL (op1) >= 0 && INTVAL (op1) <= 255) { emit_insn (gen_subsi3 (op0, op1, op2)); *len = 1; } else { rtx reg = gen_reg_rtx (SImode); emit_insn (gen_movsi (reg, op1)); emit_insn (gen_subsi3 (op0, reg, op2)); *len = 2; } }" GCC,arc,13,"static void workaround_arc_anomaly(void) { if (arc_hazard_p()) { emit_insn(gen_nop()); } } ``` This function checks if the `arc_hazard` is present, and if it is, it inserts a `NOP` instruction to ensure that the branch is not shortened. The `arc_hazard` is a target-specific value that indicates the presence of a hazard in the ARC architecture. You can call this function in the machine reorg phase of the GCC compiler by adding the following code to your machine reorg pass: ``` void machine_reorg(void) { workaround_arc_anomaly(); }" GCC,arc,14,"arc_ccfsm_branch_deleted_p (rtx insn) { if (GET_CODE (insn) != JUMP_INSN) return false; rtx cond = XEXP (insn, 1); if (GET_CODE (cond) != EQ_EXPR) return false; rtx op0 = XEXP (cond, 0); rtx op1 = XEXP (cond, 1); if (GET_CODE (op0) != REG || GET_CODE (op1) != REG) return false; if (arc_target_specific_value (insn, 1, 2, 1, 0)) return true; return false; }" GCC,arc,15,arc_ccfsm_record_branch_deleted (rtx_insn *insn) { if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == SET && GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE) { rtx_insn *next_insn = NEXT_INSN (insn); while (next_insn && GET_CODE (next_insn) == INSN && GET_CODE (PATTERN (next_insn)) == SET && GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE) { next_insn = NEXT_INSN (next_insn); } if (next_insn && GET_CODE (next_insn) == JUMP_INSN) { arc_ccfsm_record_branch_deleted (next_insn); } } } GCC,arc,16,arc_delay_slots_for_epilogue (void) { if (arc_interrupt_handler_p ()) return 0; if (arc_frame_size () == 0) return 1; return 0; } GCC,arc,17,"void arc_finalize_pic (rtx insn) { if (GET_CODE (insn) == CALL_INSN) { rtx link = XEXP (insn, 1); while (link) { if (GET_CODE (link) == GOTOFF) { rtx basereg = gen_rtx_REG (SImode, GOTBASE); rtx pc = gen_rtx_REG (SImode, PC); rtx offset = gen_rtx_CONST (SImode, unspec_rtx (symref_rtx (XSTR (link, 0)), 3)); rtx add = gen_rtx_ADD (SImode, basereg, pc, offset); emit_insn (add); break; } link = XEXP (link, 1); } } }" GCC,arc,18,"void arc_init(void) { arc_target_specific_values = { .mcpu = ""base"", .base = ""base"", .bad_value = ""bad value (%s) for -mcpu switch"", .base = 0, .arc = 1, .arc_0 = 1, .arc_1 = 1, .arc_2 = 1, .arc_3 = 1, .arc_4 = 1, .arc_5 = 1, .arc_6 = 1, .arc_7 = 1, }; arc_backend_init(); }" GCC,arc,19,void arc_init_reg_tables(void) { int i; for (i = 0; i < ARC_MODE_CLASS_SIZE; i++) { arc_mode_class[i] = i % 2 ? 0 : 1; } } GCC,arc,20,"int arc_address_cost(int addr) { if (addr == 0) { return 0; } int cost = 0; int i = 0; while (i < 8) { if (addr & (1 << i)) { cost += 1; } i++; } return cost; } ``` This function takes an integer `addr` as input, and returns the cost of the addressing mode that contains `addr`. The cost is calculated by counting the number of bits that are set in `addr`. If `addr` is zero, the cost is zero. Here's an example of how you could use this function: ``` int main() { int addr = 0x1234; int cost = arc_address_cost(addr); printf(""The cost of the addressing mode that contains %#x is %d\n"", addr, cost); return 0; }" GCC,arc,21,"arc_assemble_integer (rtx x, int size, int aligned_p) { if (aligned_p) { fputs (""\t.word\t"", asm_out_file); output_addr_const (asm_out_file, x); fputs (""\n"", asm_out_file); } else { fputs (""\t.byte\t"", asm_out_file); output_addr_const (asm_out_file, x); fputs (""\n"", asm_out_file); } }" GCC,arc,22,"void arc_ccfsm_at_label(int num, const char *prefix) { arc_ccfsm_label_num = num; arc_ccfsm_label_prefix = prefix; if (num == arc_ccfsm_label_num && strcmp(prefix, arc_ccfsm_label_prefix) == 0) { arc_ccfsm_reset(); } } ``` This function takes two arguments: `num`, which is the label number, and `prefix`, which is the label prefix. It records the current label number and prefix, and if the label number and prefix match the target specific value `arc,3,""L"",0`, it resets the ccfsm machinery. You can call this function from the `targetm.asm_out.internal_label` function, which is the target-specific function for outputting internal labels. Here's an example of how you can modify the `targetm.asm_out.internal_label` function to call `arc_ccfsm_at_label`: ``` void targetm_asm_out_internal_label(int num, const char *prefix) { fprintf(asm_out_file, ""%s%d"", prefix, num); arc_ccfsm_at_label(num, prefix); }" GCC,arc,23,"arc_ccfsm_branch_deleted_p (rtx insn) { if (GET_CODE (insn) != JUMP_INSN) return false; rtx cond = XEXP (insn, 1); if (GET_CODE (cond) != EQ_EXPR) return false; rtx op0 = XEXP (cond, 0); rtx op1 = XEXP (cond, 1); if (GET_CODE (op0) != REG || GET_CODE (op1) != REG) return false; if (arc_target_specific_value (insn, 1, 2, 1, 0)) return true; return false; }" GCC,arc,24,arc_ccfsm_record_branch_deleted (rtx_insn *insn) { if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == SET && GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE) { rtx_insn *next_insn = NEXT_INSN (insn); while (next_insn && GET_CODE (next_insn) == INSN && GET_CODE (PATTERN (next_insn)) == SET && GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE) { next_insn = NEXT_INSN (next_insn); } if (next_insn && GET_CODE (next_insn) == JUMP_INSN) { arc_ccfsm_record_branch_deleted (next_insn); } } } GCC,arc,25,arc_compute_frame_size (int size) { int frame_size; frame_size = (size + 3) & ~3; frame_size += 4; return frame_size; } GCC,arc,26,tree arc_compute_function_type(tree decl) { static tree result; if (decl == NULL_TREE) { result = NULL_TREE; return result; } if (result == NULL_TREE) { result = decl->type; } return result; } GCC,arc,27,arc_delay_slots_for_epilogue (void) { if (arc_interrupt_handler_p ()) return 0; if (arc_frame_size () == 0) return 1; return 0; } GCC,arc,28,int arc_double_limm_p (const_double value) { if (value.is_zero()) return 1; if (value.is_negative()) return 0; if (value.is_positive()) return 1; return 0; } GCC,arc,29,"arc_eligible_for_epilogue_delay (rtx trial, int slot) { if (GET_CODE (trial) != INSN) return false; if (GET_MODE (trial) != QImode) return false; if (GET_CODE (PATTERN (trial)) != SET) return false; if (GET_CODE (SET_SRC (PATTERN (trial))) != REG) return false; if (GET_CODE (SET_DEST (PATTERN (trial))) != REG) return false; if (GET_CODE (SET_SRC (PATTERN (trial))) == STACK_POINTER_REGNUM || GET_CODE (SET_SRC (PATTERN (trial))) == FRAME_POINTER_REGNUM) return false; if (GET_CODE (SET_DEST (PATTERN (trial))) == STACK_POINTER_REGNUM || GET_CODE (SET_DEST (PATTERN (trial))) == FRAME_POINTER_REGNUM) return false; if (GET_CODE (SET_SRC (PATTERN (trial))) == CALL_SAVED_REGNUM) return false; return true; }" GCC,arc,30,"arc_external_libcall (rtx_insn *insn, rtx_insn *operands, int noperands) { const char *func_name = XSTR (operands[0], 0); const char *func_proto = XSTR (operands[1], 0); const char *func_ret_type = XSTR (operands[2], 0); const char *func_args = XSTR (operands[3], 0); fprintf (asm_out_file, ""\t.rename\t_%s, _%s%s\n"", func_name, func_name, func_ret_type); fprintf (asm_out_file, ""\t.rename\t_%s, _%s%s\n"", func_proto, func_proto, func_ret_type); fprintf (asm_out_file, ""\t.rename\t_%s, _%s%s\n"", func_args, func_args, func_ret_type); }" GCC,arc,31,"void arc_file_start(void) { const char *cpu_type = ""arc""; fprintf(stderr, ""/*\n * This file is generated by the ARC backend of the GCC compiler.\n */\n""); fprintf(stderr, ""/*\n * CPU type: %s\n */\n"", cpu_type); fprintf(stderr, ""\t.cpu %s\n"", cpu_type); }" GCC,arc,32," unsigned int lang_mask ATTRIBUTE_UNUSED) { switch (decoded->opt_index) { case OPT_mtune_: if (decoded->arg == ""arc"") { opts->x_target_flags |= MASK_ARC; return true; } break; case OPT_mbase_: if (decoded->arg == ""arc"") { opts->x_target_flags |= MASK_ARC_BASE; return true; } break; case OPT_march_: if (decoded->arg == ""arc"") { opts->x_target_flags |= MASK_ARC_ARCH; return true; } break; default: break; } return false; }" GCC,arc,33,void arc_init(void) { arc_set_target_specific_value(ARC_TARGET_SPECIFIC_VALUE); } GCC,arc,34,"static void arc_initialize_trampoline(void *trampoline, void *target) { uint32_t *trampoline_ptr = (uint32_t *)trampoline; uint32_t *target_ptr = (uint32_t *)target; int32_t offset = (int32_t)target_ptr - (int32_t)trampoline_ptr; trampoline_ptr[0] = (uint32_t)offset; trampoline_ptr[1] = (uint32_t)offset >> 16; trampoline_ptr[2] = TRAMPOLINE_ALIGNMENT; trampoline_ptr[3] = TRAMPOLINE_SIZE; }" GCC,arc,35,void arc_init_reg_tables(void) { int i; for (i = 0; i < ARC_MODE_CLASS_SIZE; i++) { arc_mode_class[i] = i % 2 ? 0 : 1; } } GCC,arc,36,"arc_internal_label (const char *prefix, int num) { fprintf (asm_out_file, ""\t.%s%d:\n"", prefix, num); }" GCC,arc,37,"void arc_output_function_prologue(void) { asm(""sub %s,%s,16\n"" ""BEGIN PROLOGUE %s vars= %d, regs= %d, args= %d, extra= %d\n"" ""sub %s,%s,%d\n"" ""st %s,[%s,%d]\n"" ""st %s,[%s]\n"" ""mov %s,%s\n"" ""END PROLOGUE\n"" : ""=r"" (stack_pointer), ""=r"" (frame_pointer) : ""r"" (arc_prologue), ""r"" (vars), ""r"" (regs), ""r"" (args), ""r"" (extra) : ""memory""); }" GCC,arc,38,"arc_print_operand_address (FILE *file, rtx x, int letter) { if (letter == 'm') { fprintf (file, ""%%st(""); output_addr_const (file, x); fprintf (file, "")""); } else { fprintf (file, ""%%st(""); output_addr_const (file, x); fprintf (file, "")""); } }" GCC,arc,39,"arc_return_in_memory (tree type, tree fn_decl) { if (TREE_CODE (type) == POINTER_TYPE || (TREE_CODE (type) == REFERENCE_TYPE && TREE_CODE (TREE_TYPE (type)) == POINTER_TYPE)) return true; if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE) return true; if (TREE_CODE (type) == ARRAY_TYPE) return true; return false; }" GCC,arc,40,"bool arc_rtx_costs(rtx x, int *total) { *total = 0; if (rtx_constant_p(x)) { *total += 1; return true; } if (rtx_register_p(x)) { *total += 1; return true; } if (rtx_memory_p(x)) { *total += 16; return true; } if (rtx_call_p(x)) { *total += 1; return true; } if (rtx_jump_p(x)) { *total += 1; return true; } if (rtx_return_p(x)) { *total += 1; return true; } if (rtx_label_p(x)) { *total += 1; return true; }" GCC,arc,41,"void arc_save_restore(uint32_t *base_reg, uint32_t gmask, bool epilogue_p, bool sibling_thunk, uint32_t first_offset) { if (gmask) { uint32_t reg_num = 0; while (gmask) { if (gmask & (1 << reg_num)) { if (epilogue_p) { asm volatile(""mov %0, [%1, %2]"" : ""=r""(reg_num) : ""r""(base_reg), ""r""(reg_num)); } else { asm volatile(""mov [%0, %1], %2"" : ""=r""(base_reg), ""=r""(reg_num) : ""r""(reg_num)); } } reg_num++; } } if (base_reg) { if (epilogue_p) { asm volatile(""mov %0, [%1]"" : ""=r""(base_reg) : ""r""(base_reg)); } else { asm volatile(""mov [%0], %1"" : ""=r""(base_reg) : ""r""(base_reg)); } }" GCC,arc,42,"enum arc_cc_mode { ARC_CC_EQ, ARC_CC_NE, ARC_CC_LT, ARC_CC_LE, ARC_CC_GT, ARC_CC_GE, ARC_CC_UNORDERED, ARC_CC_ORDERED, }; static enum arc_cc_mode arc_select_cc_mode (enum rtx_code code, rtx op0) { switch (code) { case EQ: return ARC_CC_EQ; case NE: return ARC_CC_NE; case LT: return ARC_CC_LT; case LE: return ARC_CC_LE; case GT: return ARC_CC_GT; case GE: return ARC_CC_GE; case UNORDERED: return ARC_CC_UNORDERED; case ORDERED: return ARC_CC_ORDERED; default: gcc_unreachable (); } }" GCC,arc,43,"arc_setup_incoming_varargs (CUMULATIVE_ARGS *cum, tree type, enum machine_mode mode) { rtx block = gen_rtx_PARAM (VOIDmode, NULL_RTX, NULL_RTX, NULL_RTX); for (int i = 0; i < cum->nargs; i++) { rtx arg = cum->args[i]; if (arg->mode == mode && arg->type == type) { rtx mem = gen_rtx_MEM (mode, arg); emit_move_insn (mem, arg); } } cum->nargs = 0; cum->args = NULL; cum->nregs = 0; cum->regs = NULL; cum->nmem = 0; cum->mem = NULL; cum->nstack = 0; cum->stack = NULL; cum->nblock = 0; cum->block = block; }" GCC,arc,44,"call_address_operand (rtx op) { if (GET_CODE (op) == SYMBOL_REF) return true; if (GET_CODE (op) == LABEL_REF) return true; if (GET_CODE (op) == CONST) { rtx x = XEXP (op, 0); if (GET_CODE (x) == SYMBOL_REF) return true; if (GET_CODE (x) == LABEL_REF) return true; } return false; }" GCC,arc,45,"call_operand (rtx op, enum machine_mode mode, int reg) { if (mode == SImode) { if (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0x10000000 && INTVAL (op) < 0x20000000) { op = gen_rtx_PLUS (SImode, op, gen_rtx_REG (SImode, PC_REGNUM)); } } if (GET_CODE (op) == MEM) { rtx addr = XEXP (op, 0); rtx reg = gen_rtx_REG (SImode, reg); emit_move_insn (reg, addr); op = reg; } emit_call_insn (gen_rtx_CALL (mode, op, NULL_RTX, NULL_RTX)); }" GCC,arc,46,"const_sint32_operand (rtx op, machine_mode mode) { if (GET_CODE (op) != CONST_INT) return false; HOST_WIDE_INT val = INTVAL (op); if (val < -0x80000000 || val > 0x7fffffff) return false; return true; }" GCC,arc,47,"const_uint32_operand (rtx op, enum machine_mode mode) { if (GET_CODE (op) != CONST_INT) return 0; if (mode == DImode) { if (INTVAL (op) < 0 || INTVAL (op) > 0xffffffff) return 0; } else { if (INTVAL (op) < 0) return 0; } if (sizeof (HOST_WIDE_INT) > 4) { if (INTVAL (op) > 0xffffffff) return 0; } return 1; }" GCC,arc,48,"gen_compare_reg (rtx op0, rtx op1, rtx op2, rtx op3, rtx op4, rtx op5) { rtx cc_reg = gen_rtx_REG (CCmode, arc_61); rtx compare_insn = gen_rtx_SET (cc_reg, gen_rtx_COMPARE (CCmode, op0, op1)); emit_insn (compare_insn); return cc_reg; }" GCC,arc,49,int get_arc_condition_code(rtx comparison) { int index = -1; switch (comparison->code) { case EQ: index = 0; break; case NE: index = 1; break; case LT: index = 2; break; case LE: index = 3; break; case GT: index = 4; break; case GE: index = 5; break; case LTU: index = 6; break; case LEU: index = 7; break; case GTU: index = 8; break; case GEU: index = 9; break; default: index = -1; break; } return index; } GCC,arc,50,"load_update_operand (rtx op) { if (GET_CODE (op) != MEM) return false; if (GET_MODE (op) != QImode) return false; if (GET_CODE (XEXP (op, 0)) != REG) return false; if (REGNO (XEXP (op, 0)) != 0) return false; if (GET_CODE (XEXP (op, 1)) != CONST_INT) return false; if (INTVAL (XEXP (op, 1)) != 0) return false; if (GET_CODE (XEXP (op, 2)) != CONST_INT) return false; if (INTVAL (XEXP (op, 2)) != 1) return false; if (GET_CODE (XEXP (op, 3)) != CONST_INT) return false; if (INTVAL (XEXP (op, 3)) != 0) return false; if (GET_CODE (XEXP (op, 4)) != CONST_INT) return false; if (INTVAL (XEXP (op, 4)) != 1) return false; if (GET_CODE (XEXP (op, 5)) != CONST_INT) return false; if (INTVAL (XEXP (op, 5)) != 0) return false; if (GET_CODE (XEXP (op, 6)) != CONST_INT) return false; if (INTVAL (XEXP (op, 6)) != 1) return false; return true; }" GCC,arc,51,long_immediate_loadstore_operand (rtx op) { if (GET_CODE (op) != MEM) return false; if (GET_MODE (op) == QImode || GET_MODE (op) == HImode) return true; if (GET_MODE (op) == SImode) return false; if (GET_MODE (op) == DImode) return true; return false; } GCC,arc,52,bool long_immediate_operand(rtx op) { if (GET_CODE(op) == CONST_INT) { HOST_WIDE_INT val = INTVAL(op); if (val < 0 || val > 0x7fffffff) return true; } return false; } GCC,arc,53,"move_dest_operand (rtx op) { if (GET_CODE (op) == REG) return true; if (GET_CODE (op) == MEM) { rtx addr = XEXP (op, 0); if (GET_CODE (addr) == REG) return true; if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 0)) == REG && GET_CODE (XEXP (addr, 1)) == CONST_INT) return true; } return false; } ``` This function first checks if `OP` is a register, and if so, returns `true`. If `OP` is not a register, it checks if it is a memory operand, and if so, it checks if the address of the memory operand is a register or a sum of a register and a constant integer. If the address is a register or a sum of a register and a constant integer, the function returns `true`. Otherwise, the function returns `false`. You can use this function in your GCC backend to determine if an operand is an acceptable argument for a move destination. For example, you can use it in the `arc_expand_move` function to determine if the destination operand is acceptable: ``` rtx arc_expand_move (rtx dest, rtx src) { if (!move_dest_operand (dest)) return NULL_RTX; }" GCC,arc,54,move_double_src_operand (rtx op) { if (GET_CODE (op) != REG) return false; if (GET_MODE (op) != DImode) return false; if (GET_MODE_SIZE (GET_MODE (op)) != 8) return false; if (GET_MODE_CLASS (GET_MODE (op)) != MODE_INT) return false; if (GET_CODE (op) == CONST_INT) return false; return true; } GCC,arc,55,move_src_operand (rtx op) { if (GET_CODE (op) == REG) return true; else if (GET_CODE (op) == MEM) return true; else if (GET_CODE (op) == CONST_INT) return true; else if (GET_CODE (op) == CONST_DOUBLE) return true; else if (GET_CODE (op) == SYMBOL_REF) return true; else if (GET_CODE (op) == LABEL_REF) return true; else return false; } GCC,arc,56,bool nonvol_nonimm_operand(rtx op) { if (GET_CODE(op) == MEM && GET_MODE(op) == QImode) { return true; } else if (GET_CODE(op) == REG && GET_MODE(op) == QImode) { return true; } else { return false; } } GCC,arc,57,"output_shift (rtx x, rtx y, rtx z, int shift) { if (shift == 0) return; if (shift < 0) { emit_insn (gen_rtx_SET (VOIDmode, x, gen_rtx_ASHIFT (SImode, y, GEN_INT (-shift)))); } else { emit_insn (gen_rtx_SET (VOIDmode, x, gen_rtx_ASHIFTRT (SImode, y, GEN_INT (shift)))); } }" GCC,arc,58,"int proper_comparison_operator(int op, int cc) { if (op == ARC_CC_EQ || op == ARC_CC_NE || op == ARC_CC_LT || op == ARC_CC_GT || op == ARC_CC_LE || op == ARC_CC_GE) { if (op == ARC_CC_EQ || op == ARC_CC_NE) { if (cc & ARC_CC_Z) { return 1; } } else if (op == ARC_CC_LT || op == ARC_CC_GT) { if (cc & ARC_CC_N) { return 1; } } else if (op == ARC_CC_LE || op == ARC_CC_GE) { if (cc & ARC_CC_Z && cc & ARC_CC_N) { return 1; } } } return 0; }" GCC,arc,59,void record_cc_ref(struct arc_cc_ref *ref) { ref->target_specific_value = 0x10000000; } GCC,arc,60,"int shift_operator(int op) { switch (op) { case arc,1,0: return 1; default: return 0; } } ``` This function takes an integer `op` as input, and returns 1 if `op` is equal to `arc,1,0`, which is the target specific value for the shift operator in the ARC backend of the GCC compiler. Otherwise, it returns 0. You can use this function in your code to check if an operator is a shift operator, like this: ``` int result = shift_operator(op); if (result == 1) { } else { }" GCC,arc,61,"short_immediate_operand (rtx op) { if (GET_CODE (op) == CONST_INT) { HOST_WIDE_INT val = INTVAL (op); if (val >= -128 && val <= 127) return true; } return false; } ``` This function uses the `GET_CODE` and `INTVAL` macros to extract the code and value of the operand, respectively. If the operand is a constant integer, the function checks if its value is within the range of -128 to 127, which is the range of short immediate values for the ARC backend. If the value is within this range, the function returns true, otherwise it returns false. You can use this function in your GCC backend to determine if an operand is a short immediate value. For example, you can use it in the `emit_insn` function to determine if an instruction operand is a short immediate value, and if so, use the appropriate instruction encoding for the ARC backend. ``` void emit_insn (rtx insn) { rtx op0 = XEXP (insn, 0); rtx op1 = XEXP (insn, 1); if (short_immediate_operand (op0)) { } else { } }" GCC,arc,62,bool store_update_operand(operand *op) { if (op->type != OP_STORE) return false; if (op->arc.target_specific_value != 0x00000001) return false; return true; } GCC,arc,63,"int symbolic_memory_operand(rtx op) { if (GET_CODE(op) == MEM) { rtx addr = XEXP(op, 0); if (GET_CODE(addr) == SYMBOL_REF) { const char *name = XSTR(addr, 0); if (strcmp(name, ""arc"") == 0) { return 1; } } } return 0; }" GCC,arc,64,"int symbolic_operand(rtx x) { if (GET_CODE(x) == SYMBOL_REF) { const char *name = XSTR(x, 0); if (strcmp(name, ""arc"") == 0 || strcmp(name, ""1"") == 0 || strcmp(name, ""0"") == 0) return 1; } return 0; }" GCC,arc,65,int arc600_corereg_hazard(int insn) { int length = 0; if (insn & 0x10000) { if ((insn & 0x0000000F) >= 32) { if ((insn & 0x0000000F) == (insn & 0x0000000F)) { length = 4; } } } return length; } GCC,arc,66,int arc_address_cost(int addr) { int cost = 0; switch (addr) { case 0: cost = 1; break; case 1: cost = 2; break; case 2: cost = 3; break; case 3: cost = 4; break; case 4: cost = 5; break; case 5: cost = 6; break; case 6: cost = 7; break; case 7: cost = 8; break; case 8: cost = 9; break; case 9: cost = 10; break; case 10: cost = 11; break; case 11: cost = 12; break; case 12: cost = 13; break; case 13: cost = 14; break; case 14: cost = 15; break; case 15: cost = 16; break; default: cost = 0; break; } return cost; } GCC,arc,67,"int arc_adjust_insn_length (rtx_insn *insn) { if (GET_CODE (insn) == SET && (GET_CODE (SET_SRC (insn)) == MEM || GET_CODE (SET_DEST (insn)) == MEM)) { if (GET_CODE (SET_SRC (insn)) == MEM && GET_CODE (XEXP (SET_SRC (insn), 0)) != REG) { return 4; } else if (GET_CODE (SET_DEST (insn)) == MEM && GET_CODE (XEXP (SET_DEST (insn), 0)) != REG) { return 4; } } return 0; }" GCC,arc,68," int byte_align, int byte_reverse) { if (byte_offset != 0 || byte_size != 4 || byte_align != 4 || byte_reverse != 0) { error (""ARC does not support partial byte loads or stores""); return; } if (byte_offset != 0 || byte_size != 4 || byte_align != 4 || byte_reverse != 0) { error (""ARC does not support partial byte loads or stores""); return; } if (byte_offset != 0 || byte_size != 4 || byte_align != 4 || byte_reverse != 0) { error (""ARC does not support partial byte loads or stores""); return; } if (byte_offset != 0 || byte_size != 4 || byte_align != 4 || byte_reverse != 0) { error (""ARC does not support partial byte loads or stores""); return; }" GCC,arc,69," int align) { if (TREE_CODE (decl) == VAR_DECL && DECL_INITIAL (decl) && DECL_INITIAL (decl) != error_mark_node) { fprintf (stream, ""\t.sbss\t%s, %d, %d\n"", name, size, align); output_asm_insn (""\t.align\t%d"", align); output_asm_insn (""\t.type\t%s, @object"", name); output_asm_insn (""\t.size\t%s, %d"", name, size); } else { fprintf (stream, ""\t.bss\t%s, %d, %d\n"", name, size, align); output_asm_insn (""\t.align\t%d"", align); output_asm_insn (""\t.type\t%s, @object"", name); output_asm_insn (""\t.size\t%s, %d"", name, size); } }" GCC,arc,70,"int arc_attr_type(rtx insn) { if (GET_CODE(insn) == INSN) { if (GET_ATTR(insn, ""type"") == NULL) return -1; else return GET_ATTR(insn, ""type"")->value; } else return -1; }" GCC,arc,71,"bool arc_can_eliminate(int from, int to) { if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM) { return true; } if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM) { return false; } return true; }" GCC,arc,72, GCC,arc,73,arc_can_use_doloop_p (void) { if (TARGET_ARC) return true; if (TARGET_ARC && TARGET_ARC_1 && TARGET_ARC_2 && TARGET_ARC_3) return true; if (TARGET_ARC && TARGET_ARC_1 && TARGET_ARC_2 && TARGET_ARC_3 && TARGET_ARC_4 && TARGET_ARC_5 && TARGET_ARC_6) return true; return false; } GCC,arc,74,"void arc_ccfsm_post_advance (rtx insn, rtx state) { if (GET_CODE (insn) == INSN && GET_CODE (state) == REG) { int arc_target_specific_value = 0; if (GET_MODE (state) == QImode) arc_target_specific_value = 5; else if (GET_MODE (state) == HImode) arc_target_specific_value = 1; else if (GET_MODE (state) == SImode) arc_target_specific_value = 0; else if (GET_MODE (state) == DImode) arc_target_specific_value = 1; if (arc_target_specific_value != 0) { rtx new_state = gen_rtx_PLUS (GET_MODE (state), state, GEN_INT (arc_target_specific_value)); emit_insn (new_state); } } }" GCC,arc,75,arc_compute_frame_size (int size) { int frame_size; frame_size = arc_target_specific_value[31] * 4; frame_size += size; frame_size = (frame_size + 3) & ~3; return frame_size; } GCC,arc,76,tree arc_compute_function_type (tree decl) { static tree result; if (decl == NULL_TREE) { result = NULL_TREE; return result; } if (result == NULL_TREE) { result = decl; return result; } tree type = TREE_TYPE (decl); if (type == NULL_TREE) { result = NULL_TREE; return result; } if (TREE_CODE (type) == FUNCTION_TYPE) { result = type; return result; } if (TREE_CODE (type) == ARRAY_TYPE) { result = TREE_TYPE (type); return result; } if (TREE_CODE (type) == POINTER_TYPE) { result = TREE_TYPE (type); return result; } result = NULL_TREE; return result; } GCC,arc,77,"int arc_compute_millicode_save_restore_regs(struct gcc_frame *frame, unsigned int gmask) { int start_reg = 13; int end_reg = 25; int num_regs = 0; for (int i = start_reg; i <= end_reg; i++) { if (gmask & (1 << i)) { num_regs++; } } frame->millicode_start_reg = start_reg; frame->millicode_end_reg = end_reg; return num_regs; }" GCC,arc,78,unsigned int arc_decl_pretend_args(tree decl) { unsigned int size = 0; for (tree param = DECL_ARGUMENTS(decl); param; param = TREE_CHAIN(param)) { if (TREE_CODE(TREE_TYPE(param)) == POINTER_TYPE || TREE_CODE(TREE_TYPE(param)) == REFERENCE_TYPE) { size += TREE_TYPE(param)->size; } } return size; } GCC,arc,79,"arc_encode_section_info (tree decl, rtx x, int first) { if (TREE_CODE (decl) == FUNCTION_DECL) { if (first) { if (DECL_SECTION_NAME (decl)) { rtx section_name = gen_rtx_SYMBOL_REF (Pmode, DECL_SECTION_NAME (decl)); x = gen_rtx_MEM (BLKmode, section_name); } else { x = gen_rtx_MEM (BLKmode, XEXP (x, 0)); } } else { x = gen_rtx_MEM (BLKmode, XEXP (x, 0)); } }" GCC,arc,80,"arc_epilogue_uses (int regno) { if (regno == arc_return_address_regs[arc_compute_function_type (cfun)]) return true; if (regno == ARC_BLINK_REGNUM && arc_isr_clobber_blink (cfun)) return true; if (arc_is_interrupt_function (cfun) && arc_live_in_reg (regno, cfun)) return true; return false; }" GCC,arc,81, GCC,arc,82,void arc_expand_prologue(struct gcc_target_specific_value *target_specific_value) { target_specific_value->stack_pointer = 0; target_specific_value->frame_pointer = 0; target_specific_value->target_specific_value = 32; target_specific_value->target_specific_value_arc = 0; target_specific_value->target_specific_value_arc_0 = 0; target_specific_value->target_specific_value_arc_0_0 = 0; target_specific_value->target_specific_value_arc_0_0_0 = 0; target_specific_value->target_specific_value_arc_0_0_0_0 = 0; target_specific_value->target_specific_value_arc_0_0_0_0_0 = 0; } GCC,arc,83,"arc_finalize_pic (void) { if (TREE_CODE (current_function_decl) == FUNCTION_DECL && DECL_GOTOFF_RELOCATIONS (current_function_decl)) { rtx insn; rtx basereg = gen_rtx_REG (Pmode, GOTBASE_REGNUM); rtx pc = gen_rtx_REG (Pmode, PC_REGNUM); rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, gen_rtx_SYMREF (Pmode, ""_DYNAMIC"")), 3); rtx plus = gen_rtx_PLUS (Pmode, pc, unspec); rtx set = gen_rtx_SET (basereg, plus); insn = emit_insn (set); RTX_FRAME_RELATED_P (insn) = 1; } }" GCC,arc,84, GCC,arc,85,"void arc_function_arg(uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t arg4) { }" GCC,arc,86,"void arc_function_arg_advance(uint32_t *cum, uint32_t last_arg_reg) { uint32_t reg_number = *cum; while (reg_number <= last_arg_reg) { reg_number++; } *cum = reg_number; }" GCC,arc,87, GCC,arc,88,arc_get_unalign (void) { return cfun->machine->unalign; } GCC,arc,89," int flags, bool *no_add_attrs) { tree arg = TREE_VALUE (args); if (TREE_CODE (arg) != STRING_CST) { error (""argument of %qE attribute is not a string constant"", name); *no_add_attrs = true; return; } const char *str = TREE_STRING_POINTER (arg); if (strcmp (str, ""ilink1"") != 0 && strcmp (str, ""ilink2"") != 0) { error (""argument of %qE attribute is not \""ilink1\"" or \""ilink2\"""", name); *no_add_attrs = true; return; } DECL_ATTRIBUTES (node) = tree_cons (name, args, DECL_ATTRIBUTES (node)); }" GCC,arc,90,"void arc_hazard(uint32_t reg, uint32_t val) { if (reg >= 32) { uint32_t nop = 0; asm volatile(""nop"" : ""=r""(nop) : ""r""(reg), ""r""(val)); } }" GCC,arc,91,void arc_init(void) { } GCC,arc,92, GCC,arc,93,"arc_initial_elimination_offset (rtx_insn *insn, int regno) { int offset = insn->address - arc_backend_get_routine_start_address (insn); if (regno != arc_backend_get_replacement_register (insn)) offset += arc_backend_get_register_size (insn, regno) - arc_backend_get_register_size (insn, arc_backend_get_replacement_register (insn)); return offset; }" GCC,arc,94,"void arc_init_machine_status(struct machine_function *mf) { mf->target_specific_value = arc,1; }" GCC,arc,95,void arc_init_reg_tables(void) { uint32_t i; for (i = 0; i < 32; i++) { arc_mode_class[i] = (i == 0) ? 4 : 1; } for (i = 0; i < 32; i++) { arc_reg_class[i] = (i == 0) ? 1 : 0; } } GCC,arc,96,"arc_in_small_data_p (tree decl) { if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != NULL && !strcmp (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)), "".sdata"")) return true; if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != NULL && !strcmp (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)), "".sbss"")) return true; return false; }" GCC,arc,97,"arc_label_align (rtx_insn *label, int align) { if (INSN_ALIGN (label) >= align) return; INSN_ALIGN (label) = align; rtx_insn *insn; for (insn = NEXT_INSN (label); insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { INSN_ALIGN (insn) = align; break; } } }" GCC,arc,98,arc_legitimate_constant_p (rtx x) { if (!CONSTANT_P (x)) return false; if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0x100000001) return true; return false; } GCC,arc,99,"arc_legitimate_pc_offset_p (rtx addr) { if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 0)) == REG && REGNO (XEXP (addr, 0)) == PC_REGNUM && GET_CODE (XEXP (addr, 1)) == CONST_INT) return true; return false; } ``` This function uses the `GET_CODE` and `GET_MODE` macros to check the type of the `addr` expression and its operands. If the `addr` expression is a `PLUS` expression with a `REG` operand that is the `PC` register, and a `CONST_INT` operand, then the function returns `true`. Otherwise, it returns `false`. You can use this function in the `arc_legitimate_address_p` function to check if an address is a legitimate address for the `arc` backend of GCC. ``` bool arc_legitimate_address_p (rtx addr, machine_mode mode, int strict) { if (arc_legitimate_pc_offset_p (addr)) return true; return false; }" GCC,arc,100, GCC,arc,101,"int arc_legitimate_pic_operand_p (rtx x, machine_mode mode) { if (GET_CODE (x) == CONST_INT) { return 1; } else if (GET_CODE (x) == CONST_DOUBLE) { return (INTVAL (x) >= -2147483648 && INTVAL (x) <= 2147483647); } else if (GET_CODE (x) == SYMBOL_REF) { return (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_PIC) != 0; } else if (GET_CODE (x) == LABEL_REF) { return (LABEL_REF_FLAGS (x) & LABEL_FLAG_PIC) != 0; } else { return 0; } }" GCC,arc,102," int *win) { rtx base, index, disp; HOST_WIDE_INT offset; if (GET_CODE (x) == PLUS) { base = XEXP (x, 0); index = XEXP (x, 1); disp = NULL_RTX; offset = 0; } else if (GET_CODE (x) == MULT) { base = XEXP (x, 0); index = XEXP (x, 1); disp = NULL_RTX; offset = 0; } else if (GET_CODE (x) == CONST_INT) { base = NULL_RTX; index = NULL_RTX; disp = x; offset = INTVAL (x); } else { base = NULL_RTX; index = NULL_RTX; disp = NULL_RTX; offset = 0; } if (base == NULL_RTX && index == NULL_RTX && disp == NULL_RTX) { *win = 0; return NULL_RTX; } if (base != NULL_RTX && GET_CODE (base) != REG) { *win = 0; return NULL_RTX; }" GCC,arc,103,"arc_lra_p (void) { return TARGET_LRA; } ``` This function simply returns the value of the `TARGET_LRA` macro, which is defined in the ARC target configuration file. If the `TARGET_LRA` macro is defined, then the LRA pass is used instead of the reload pass. You can use this function in your GCC plugin to determine whether the LRA pass is used for the ARC target. For example: ``` if (arc_lra_p()) { } else { }" GCC,arc,104,"arc_mode_dependent_address_p (rtx addr) { if (!legitimate_address_p (addr)) return false; if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 0)) == REG && GET_CODE (XEXP (addr, 1)) == CONST_INT) { if (INTVAL (XEXP (addr, 1)) % GET_MODE_SIZE (GET_MODE (addr)) == 0) return true; } return false; }" GCC,arc,105,"arc_next_active_insn (rtx insn) { while (insn) { if (GET_CODE (insn) == ADDR_VEC || GET_CODE (insn) == DIFF_VEC) return NULL_RTX; if (GET_CODE (insn) == SEQUENCE) { rtx next = XEXP (insn, 1); if (next && GET_CODE (next) == INSN) return next; } insn = NEXT_INSN (insn); } return NULL_RTX; }" GCC,arc,106,"int arc_output_addsi (rtx *operands, int cond_p, int output_p) { rtx op0 = operands[0]; rtx op1 = operands[1]; rtx op2 = operands[2]; if (cond_p) { emit_insn (gen_rtx_COND_EXEC (VOIDmode, gen_rtx_EQ (VOIDmode, op0, op1), gen_rtx_ADD (VOIDmode, op0, op2))); return 0; } if (output_p) { emit_insn (gen_rtx_ADD (VOIDmode, op0, op2)); return 4; } return 0; }" GCC,arc,107,"int arc_output_commutative_cond_exec (rtx_insn *insn, bool output_p) { rtx operands[3]; int length = 0; operands[0] = XEXP (insn, 0); operands[1] = XEXP (insn, 1); operands[2] = XEXP (insn, 2); if (output_p) { switch (GET_CODE (operands[2])) { case BMSK: length = 2; output_asm_insn (""bmsk%? %0,%1,%Z2"", operands); break; case BCLR: length = 2; output_asm_insn (""bclr%? %0,%1,%M2"", operands); break; case BIC: length = 2; output_asm_insn (""bic%? %0,%1,%n2-1"", operands); break; case BSET: length = 2; output_asm_insn (""bset%? %0,%1,%z2"", operands); break; case BXOR: length = 2; output_asm_insn (""bxor%? %0,%1,%z2"", operands); break; default: gcc_unreachable (); } } return length; }" GCC,arc,108,"void arc_output_libcall(const char *fname, int argc, char **argv) { printf(""arc,64,35,\""add r12,pcl,@%s-(.&-4)\n\tjl%%!%%* [r12]\"",\""jl%%! @%s\"",\""bl%%!%%* @%s\""\n"", fname, argv[0], argv[1]); }" GCC,arc,109,"void arc_output_mi_thunk (const char *arc, int delta, int function) { fprintf(arc, ""\tld\t%s, [%s]\n"", arc, delta); fprintf(arc, ""\tadd\t%s, %s, %s\n"", arc, delta, function); fprintf(arc, ""\tj\t%s\n"", function); }" GCC,arc,110,void arc_override_options(uint32_t *options) { uint32_t arc_options = 0; if (options & ARC_OPTION_ARC) { arc_options |= ARC_OPTION_ARC; } if (options & ARC_OPTION_3) { arc_options |= ARC_OPTION_3; } if (options & ARC_OPTION_255) { arc_options |= ARC_OPTION_255; } if (options & ARC_OPTION_1) { arc_options |= ARC_OPTION_1; } if (options & ARC_OPTION_0) { arc_options |= ARC_OPTION_0; } if (arc_options & ARC_OPTION_ARC) { options |= ARC_OPTION_ARC; } if (arc_options & ARC_OPTION_3) { options |= ARC_OPTION_3; } GCC,arc,111,arc_pad_return (rtx_insn *insn) { if (arc_call_return_p (insn)) { if (arc_call_p (insn)) { emit_insn (gen_nop_s ()); } else { emit_insn (gen_nop ()); } } } GCC,arc,112,"void arc_pass_by_reference(void *arg, size_t size) { if (size > 8) { void *ref = arg; __asm__ volatile(""mov %0, %1"" : ""=r""(ref) : ""r""(arg)); } else { __asm__ volatile(""mov %0, %1"" : ""=r""(arg) : ""r""(arg)); } }" GCC,arc,113,"void arc_print_operand_address (rtx operand, int code) { if (code == 0) { fprintf (asm_out_file, ""0x%08x"", operand); } else { fprintf (asm_out_file, ""%s"", XSTR (operand, 0)); } }" GCC,arc,114,"void arc_process_double_reg_moves(uint32_t *dest, uint32_t *src) { uint32_t temp; if (dest[0] == 40 && dest[1] == 44 && src[0] == 40 && src[1] == 44) { temp = src[2]; dest[2] = temp; } else if (dest[0] == 44 && dest[1] == 40 && src[0] == 44 && src[1] == 40) { temp = src[2]; dest[2] = temp; } }" GCC,arc,115,bool arc_profile_call(tree callee) { emit_profile_call(callee); return true; } GCC,arc,116,"arc_raw_symbolic_reference_mentioned_p (rtx op, bool skip_local) { if (GET_CODE (op) == SYMBOL_REF) { if (skip_local && SYMBOL_REF_LOCAL_P (op)) return false; return true; } else if (GET_CODE (op) == UNSPEC) { if (XINT (op, 1) == UNSPEC_ARC) return false; return true; } return false; }" GCC,arc,117,"arc_register_move_cost (rtx op, enum machine_mode mode) { int cost = 0; switch (mode) { case QImode: cost = 1; break; case HImode: cost = 2; break; case SImode: cost = 3; break; case DImode: cost = 4; break; case TImode: cost = 5; break; default: gcc_unreachable (); } return cost; }" GCC,arc,118,"arc_return_in_memory (tree type, tree fn_decl) { if (TREE_CODE (type) == POINTER_TYPE || (TREE_CODE (type) == REFERENCE_TYPE && TREE_CODE (TREE_TYPE (type)) == POINTER_TYPE)) return true; if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE) return true; if (TREE_CODE (type) == ARRAY_TYPE) return true; return false; }" GCC,arc,119,int arc_return_slot_offset(void) { int offset = -1; if (arc_return_address_on_stack()) { offset = arc_return_address_offset(); } return offset; } GCC,arc,120,"arc_rewrite_small_data (rtx op) { if (GET_CODE (op) == MEM && GET_MODE (op) == QImode) { rtx addr = XEXP (op, 0); if (GET_CODE (addr) == SYMBOL_REF) { const char *name = XSTR (addr, 0); if (strncmp (name, ""__small_data_"", 12) == 0) { rtx new_addr = gen_rtx_MEM (QImode, gen_rtx_SYMBOL_REF (Pmode, name + 12)); new_addr = gen_rtx_PLUS (Pmode, new_addr, gen_rtx_SYMBOL_REF (Pmode, ""__small_data_base"")); emit_insn (gen_rtx_SET (VOIDmode, op, new_addr)); } } } }" GCC,arc,121,"bool arc_rewrite_small_data_p (rtx x) { if (GET_CODE (x) == SYMBOL_REF) return true; if (GET_CODE (x) == LABEL_REF) return true; if (GET_CODE (x) == CONST) { rtx op0 = XEXP (x, 0); rtx op1 = XEXP (x, 1); if (GET_CODE (op0) == SYMBOL_REF && GET_CODE (op1) == CONST_INT) return true; if (GET_CODE (op0) == LABEL_REF && GET_CODE (op1) == CONST_INT) return true; } return false; }" GCC,arc,122," rtx_insn *reload_insn) { rtx_insn *arc_insn; rtx_insn *arc_secondary_insn; rtx_insn *arc_reload_insn; arc_insn = insn; arc_secondary_insn = secondary_insn; arc_reload_insn = reload_insn; if (arc_insn->mode_dependent_address_p) { arc_secondary_insn = gen_rtx_INSN (VOIDmode, arc_insn, arc_secondary_insn, arc_reload_insn); arc_secondary_insn->mode_dependent_address_p = true; arc_secondary_insn->secondary_p = true; arc_secondary_insn->reload_p = true; emit_insn (arc_secondary_insn); } }" GCC,arc,123,"arc_select_cc_mode (enum rtx_code code, rtx op0) { switch (code) { case EQ: return CCmode; case NE: return CCmode; case GT: return CCmode; case GE: return CCmode; case LT: return CCmode; case LE: return CCmode; case GTU: return CCmode; case GEU: return CCmode; case LTU: return CCmode; case LEU: return CCmode; default: return CCmode; } }" GCC,arc,124,"arc_setup_incoming_varargs (CUMULATIVE_ARGS *cum, tree type, enum machine_mode mode) { tree block = create_block (type, mode); for (int i = 0; i < cum->nargs; i++) { tree arg = cum->args[i]; if (arg->type == type && arg->mode == mode) { tree mem = create_memory_reference (block, arg->type, arg->mode); emit_move_insn (mem, arg); } } cum->nargs = 0; cum->args = NULL; }" GCC,arc,125,void arc_set_default_type_attributes(tree type) { type->attributes = 0; type->attributes |= TARGET_ARC_ATTRIBUTE_ARC; type->alignment = 0; type->size = 0; type->offset = 0; } GCC,arc,126,"arc_trampoline_adjust_address (rtx insn, rtx operand, int *adjust) { if (GET_CODE (operand) == SYMBOL_REF) { const char *name = XSTR (operand, 0); if (strncmp (name, ""arc,2"", 5) == 0) { *adjust = 1; } } } ``` This function takes three arguments: `insn`, which is the instruction that is being processed; `operand`, which is the operand of the instruction that is being adjusted; and `adjust`, which is a pointer to an integer that will be set to 1 if the address needs to be adjusted. The function first checks if the operand is a symbol reference. If it is, it gets the name of the symbol and checks if it starts with ""arc,2"". If it does, it sets the `adjust` pointer to 1, indicating that the address needs to be adjusted. You can then use this function in the ARC backend of GCC to adjust the addresses of trampolines. For example, you could call it in the `arc_adjust_address` function, which is called by the `arc_expand_insn` function to adjust the addresses of instructions: ``` static void arc_adjust_address (rtx insn, rtx operand, int *adjust) { if (GET_CODE (operand) == SYMBOL_REF) { const char *name = XSTR (operand, 0); if (strncmp (name, ""arc,2"", 5) == 0) { *adjust = 1; } }" GCC,arc,127,"arc_use_by_pieces_infrastructure_p (void) { return true; } ``` This function returns `true` to indicate that the ARC backend should use the ""by pieces"" infrastructure for code generation. You can also add a target-specific value to the function, as you mentioned in your question. Here's an example of how you could do that: ``` static bool arc_use_by_pieces_infrastructure_p (void) { return true; } static bool arc_use_by_pieces_infrastructure_p (void) { return true; } static bool arc_use_by_pieces_infrastructure_p (void) { return true; } static bool arc_use_by_pieces_infrastructure_p (void) { return true; } static bool arc_use_by_pieces_infrastructure_p (void) { return true; } static bool arc_use_by_pieces_infrastructure_p (void) { return true; } static bool arc_use_by_pieces_infrastructure_p (void) { return true; } static bool arc_use_by_pieces_infrastructure_p (void) { return true; } static bool arc_use_by_pieces_infrastructure_p (void) { return true; } static bool arc_use_by_pieces_infrastructure_p (void) { return true; } static bool arc_use_by_pieces_infrastructure_p (void) { return true; }" GCC,arc,128,arc_vector_mode_supported_p (machine_mode mode) { if (mode == VOIDmode) return false; switch (mode) { case QImode: case HImode: case SImode: case DImode: case TImode: case OImode: case XImode: return true; default: return false; } } GCC,arc,129,"int arc_verify_short(rtx insn, int unalign, int check_attr) { if (check_attr > 0) { if (GET_CODE(insn) == INSN && GET_MODE(insn) == QImode) { return 1; } } else if (unalign == 0) { if (GET_CODE(insn) == INSN && GET_MODE(insn) == QImode) { return 1; } } else { return 0; } }" GCC,arc,130,"int branch_dest(rtx_insn *insn) { rtx_insn *dest; dest = get_attr_dest(insn); if (dest && GET_CODE(dest) == CODE_LABEL) { dest = XEXP(dest, 0); } return dest; } ``` This function takes an `rtx_insn` pointer as an argument, which represents the branch instruction. It then uses the `get_attr_dest` function to get the destination address of the branch instruction. If the destination address is a branch instruction, it gets the address of the branch instruction using the `XEXP` function. Finally, it returns the destination address of the branch instruction. You can use this function in your ARC backend of the GCC compiler to get the destination address of a branch instruction. For example, you can call this function in the `arc_expand_branch` function to get the destination address of a branch instruction and then use it to generate the appropriate branch instruction. Here is an example of how you can use this function in the `arc_expand_branch` function: ``` void arc_expand_branch(rtx_insn *insn) { int dest; dest = branch_dest(insn); if (dest) { emit_jump_insn(gen_jump(dest)); } else { emit_jump_insn(gen_jump(insn)); } }" GCC,arc,131,"bool check_if_valid_regno_const(int opno) { if (opno >= 0 && opno < 16) { return true; } else { error(""arc: register number must be a compile-time constant. Try giving higher optimization levels""); return false; } }" GCC,arc,132,"check_if_valid_sleep_operand (tree operand) { if (TREE_CODE (operand) != INTEGER_CST) return false; if (TREE_UNSIGNED (operand) && TREE_INT_CST_LOW (operand) < 64) return true; error (""operand for __builtin_arc_sleep must be an unsigned 6-bit compile-time constant""); return false; }" GCC,arc,133," int unsignedp, int volatilep) { if (GET_CODE (op) == MEM) { rtx addr = XEXP (op, 0); if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 0)) == REG && REGNO (XEXP (addr, 0)) == ARC_SDA_BASE_REGNUM && GET_CODE (XEXP (addr, 1)) == CONST_INT) { HOST_WIDE_INT offset = INTVAL (XEXP (addr, 1)); if (offset >= 0 && offset < 65536) { op = gen_rtx_MEM (mode, gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, ARC_SDA_BASE_REGNUM), GEN_INT (offset))); return op; } } } return NULL_RTX; }" GCC,arc,134,"conditionalize_nonjump (rtx insn, rtx cond, bool annulled) { rtx pat = PATTERN (insn); rtx new_pat; if (annulled) { new_pat = gen_rtx_COND (VOIDmode, cond, gen_rtx_ANNUL (VOIDmode, pat), gen_rtx_ANNUL (VOIDmode, pat)); } else { new_pat = gen_rtx_COND (VOIDmode, cond, gen_rtx_ANNUL (VOIDmode, pat), pat); } return new_pat; }" GCC,arc,135,"emit_pic_move (rtx *operands, int noperands) { int i; for (i = 1; i < noperands; i++) { rtx src = operands[i]; rtx dest = operands[0]; if (REG_P (src)) { emit_insn (gen_rtx_SET (VOIDmode, dest, src)); } else if (MEM_P (src)) { emit_insn (gen_rtx_SET (VOIDmode, dest, gen_rtx_MEM (GET_MODE (dest), src))); } else if (CONSTANT_P (src)) { emit_insn (gen_rtx_SET (VOIDmode, dest, src)); } } }" GCC,arc,136,"void emit_store_direct(tree block, tree offset, tree value) { tree addr = build_unary_op(ADDR_EXPR, block, 0); addr = build_binary_op(PLUS_EXPR, addr, offset); emit_move_insn(addr, value); }" GCC,arc,137,"uint32_t force_offsettable(uint32_t addr, uint32_t size, bool reuse) { uint32_t offset = 0; uint32_t mask = ARC_TARGET_SPECIFIC_VALUE; if (reuse) { offset = addr & mask; addr = addr - offset; } asm volatile ( ""mov %0, %1\n"" ""and %2, %3\n"" ""add %0, %4\n"" : ""=r"" (addr) : ""r"" (addr), ""r"" (mask), ""r"" (size), ""r"" (offset) : ""memory"" ); return addr; }" GCC,arc,138,"frame_move (rtx src, rtx dst) { rtx insn = emit_insn (gen_frame_move (dst, src)); RTX_FRAME_RELATED_P (insn) = 1; }" GCC,arc,139,"gen_mhi (void) { REG r; r = gen_rtx_REG (SImode, arc_mhi_regnum); return r; }" GCC,arc,140,"gen_mlo (void) { rtx mlo = gen_rtx_REG (SImode, arc_mlo_regnum); return mlo; }" GCC,arc,141,"static const char *arc_condition_codes[] = { ""eq"", ""ne"", ""cs"", ""cc"", ""mi"", ""pl"", ""vs"", ""vc"", ""hi"", ""ls"", ""ge"", ""lt"", ""gt"", ""le"", ""al"", ""nv"" }; static int get_arc_condition_code (rtx comparison) { int index; if (comparison->code != CODE_COMPARE) return -1; index = comparison->arc_specific_value.code; if (index < 0 || index >= ARRAY_SIZE (arc_condition_codes)) return -1; return index; }" GCC,arc,142,"output_short_suffix (rtx insn) { if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET && GET_CODE (SET_SRC (PATTERN (insn))) == REG && GET_CODE (SET_DEST (PATTERN (insn))) == REG && GET_MODE (SET_SRC (PATTERN (insn))) == GET_MODE (SET_DEST (PATTERN (insn))) && GET_MODE_SIZE (GET_MODE (SET_SRC (PATTERN (insn)))) <= 2) { fputs (""_s"", asm_out_file); cfun->machine->unalign = 1; } }" GCC,arc,143,"bool prepare_extend_operands(rtx operands[], int code, int omode) { int i; if (code == 0) return false; for (i = 0; i < 8; i++) { if (operands[i] == NULL_RTX) continue; if (GET_CODE(operands[i]) == REG) { operands[i] = gen_rtx_REG(omode, REGNO(operands[i])); } else if (GET_CODE(operands[i]) == MEM) { operands[i] = gen_rtx_MEM(omode, XEXP(operands[i], 0)); } else { operands[i] = operands[i]; } } return true; }" GCC,arc,144,"prepare_move_operands (rtx operands[], machine_mode mode) { if (mode == QImode) { operands[0] = simplify_gen_subreg (QImode, operands[0], mode, 0); operands[1] = simplify_gen_subreg (QImode, operands[1], mode, 0); } else if (mode == HImode) { operands[0] = simplify_gen_subreg (HImode, operands[0], mode, 0); operands[1] = simplify_gen_subreg (HImode, operands[1], mode, 0); } else if (mode == SImode) { operands[0] = simplify_gen_subreg (SImode, operands[0], mode, 0); operands[1] = simplify_gen_subreg (SImode, operands[1], mode, 0); }" GCC,arc,145,"bool small_data_pattern(rtx op) { if (GET_CODE(op) == SYMBOL_REF) { const char *name = XSTR(op, 0); if (strcmp(name, ""arc"") == 0) { return true; } } return false; }" GCC,arc,146,unspec_prof_hash (const void *key) { const struct cgraph_node *node = (const struct cgraph_node *) key; const char *caller_name = node->caller->name; const char *callee_name = node->callee->name; hashval_t hash = 0; if (caller_name) hash = hash_string (caller_name); if (callee_name) hash = hash_string (callee_name); return hash; } GCC,arc,147,"unspec_prof_htab_eq (const_rtx x, const_rtx y) { if (x == y) return 1; if (GET_CODE (x) != UNSPEC_PROF || GET_CODE (y) != UNSPEC_PROF) return 0; if (XSTR (x, 0) != XSTR (y, 0)) return 0; if (XSTR (x, 1) != XSTR (y, 1)) return 0; if (XSTR (x, 2) != XSTR (y, 2)) return 0; if (XSTR (x, 3) != XSTR (y, 3)) return 0; if (XSTR (x, 4) != XSTR (y, 4)) return 0; if (XSTR (x, 5) != XSTR (y, 5)) return 0; if (XSTR (x, 6) != XSTR (y, 6)) return 0; if (XSTR (x, 7) != XSTR (y, 7)) return 0; if (XSTR (x, 8) != XSTR (y, 8)) return 0; if (XSTR (x, 9) != XSTR (y, 9)) return 0; return 1; }" GCC,arc,148, GCC,arc,149,tree arc_compute_function_type(tree decl) { static tree result; if (decl == NULL_TREE) { result = NULL_TREE; } else { result = decl->type; } return result; } GCC,arc,150,"arc_dwarf_register_span (rtx reg) { if (GET_CODE (reg) == PARALLEL) { int num_regs = XVECLEN (reg, 0); rtx *reg_pieces = XNEWVEC (rtx, num_regs); for (int i = 0; i < num_regs; i++) { rtx reg_piece = XEXP (XVECEXP (reg, 0, i), 0); reg_pieces[i] = reg_piece; } return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (num_regs, reg_pieces)); } else { return NULL_RTX; } }" GCC,arc,151,"void arc_expand_builtin_aligned(void *val, int alignval) { uintptr_t addr = (uintptr_t)val; uintptr_t aligned_addr = (addr + alignval - 1) & ~(alignval - 1); return (void *)aligned_addr; }" GCC,arc,152, GCC,arc,153,"arc_finalize_pic (void) { if (TREE_CODE (current_function_decl) == FUNCTION_DECL && DECL_GOTOFF_RELOCATIONS (current_function_decl)) { rtx insn; rtx basereg = gen_rtx_REG (Pmode, GOTBASE_REGNUM); rtx pc = gen_rtx_REG (Pmode, PC_REGNUM); rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, gen_rtx_SYMREF (Pmode, ""_DYNAMIC"")), 3); rtx plus = gen_rtx_PLUS (Pmode, pc, unspec); rtx set = gen_rtx_SET (basereg, plus); insn = emit_insn (set); RTX_FRAME_RELATED_P (insn) = 1; } }" GCC,arc,154," int flags, bool *no_add_attrs) { tree arg; if (TREE_CODE (node) != FUNCTION_DECL) return; arg = TREE_VALUE (args); if (TREE_CODE (arg) != STRING_CST) { error (""argument of %qE attribute is not a string constant"", name); return; } if (strcmp (TREE_STRING_POINTER (arg), ""ilink1"") == 0) { ... } else if (strcmp (TREE_STRING_POINTER (arg), ""ilink2"") == 0) { ... } else if (strcmp (TREE_STRING_POINTER (arg), ""ilink"") == 0) { ... } else { error (""argument of %qE attribute is not \""ilink1\"", \""ilink2\"", or \""ilink\"""", name); return; } DECL_ATTRIBUTES (node) = tree_cons (name, args, DECL_ATTRIBUTES (node)); }" GCC,arc,155,"void arc_hazard(uint32_t reg, uint32_t val) { if (reg >= 32) { asm volatile(""nop""); } asm volatile(""mov %0, %1"" : ""=r""(reg) : ""r""(val)); }" GCC,arc,156,void arc_init_reg_tables(void) { uint32_t i; for (i = 0; i < 32; i++) { arc_mode_class[i] = (i == 0) ? 4 : 1; } for (i = 0; i < 32; i++) { arc_reg_class[i] = (i == 0) ? 1 : 0; } } GCC,arc,157,void arc_override_options(uint32_t *options) { uint32_t arc_options = 0; if (options & ARC_OPTION_ARC) { arc_options |= ARC_OPTION_ARC; } if (options & ARC_OPTION_3) { arc_options |= ARC_OPTION_3; } if (options & ARC_OPTION_255) { arc_options |= ARC_OPTION_255; } if (options & ARC_OPTION_1) { arc_options |= ARC_OPTION_1; } if (options & ARC_OPTION_0) { arc_options |= ARC_OPTION_0; } if (arc_options & ARC_OPTION_ARC) { options |= ARC_OPTION_ARC; } if (arc_options & ARC_OPTION_3) { options |= ARC_OPTION_3; } GCC,arc,158,"void arc_print_operand_address (rtx operand, int code) { if (code == 0) { fprintf (asm_out_file, ""0x%08x"", operand); } else { fprintf (asm_out_file, ""%s"", XSTR (operand, 0)); } }" GCC,arc,159,"void arc_process_double_reg_moves(uint32_t *dest, uint32_t *src) { uint32_t temp; if (dest[0] == 40 && dest[1] == 44 && src[0] == 40 && src[1] == 44) { temp = src[2]; dest[2] = temp; } else if (dest[0] == 44 && dest[1] == 40 && src[0] == 44 && src[1] == 40) { temp = src[2]; dest[2] = temp; } }" GCC,arc,160, GCC,arc,161, GCC,arc,162,void arc_split_move(uint64_t operands[6]) { uint32_t operand0 = operands[0]; uint32_t operand1 = operands[1]; uint32_t operand2 = operands[2]; uint32_t operand3 = operands[3]; uint32_t operand4 = operands[4]; uint32_t operand5 = operands[5]; uint32_t move1 = (operand0 << 32) | (operand1 & 0xFFFFFFFF); uint32_t move2 = (operand2 << 32) | (operand3 & 0xFFFFFFFF); operands[0] = move1; operands[1] = move2; operands[2] = operand4; operands[3] = operand5; } GCC,arc,163,"emit_unlikely_jump (rtx_insn *insn) { rtx_insn *prev_insn = PREV_INSN (insn); if (prev_insn && GET_CODE (prev_insn) == JUMP_INSN) { rtx_insn *target_insn = JUMP_LABEL (prev_insn); if (target_insn && GET_CODE (target_insn) == CODE_LABEL) { rtx_insn *new_insn = emit_jump_insn_before (target_insn, prev_insn); new_insn->set_unlikely (); } } }" GCC,arc,164,"bool prepare_extend_operands(rtx operands[], int code, int omode) { int i; if (code == 0) return false; for (i = 0; i < 8; i++) { if (operands[i] == NULL_RTX) continue; if (GET_CODE(operands[i]) == REG) { operands[i] = gen_rtx_REG(omode, REGNO(operands[i])); } else if (GET_CODE(operands[i]) == MEM) { operands[i] = gen_rtx_MEM(omode, XEXP(operands[i], 0)); } else { operands[i] = operands[i]; } } return true; }" GCC,arc,165,"prepare_move_operands (rtx operands[], machine_mode mode) { if (mode == QImode) { operands[0] = simplify_gen_subreg (QImode, operands[0], mode, 0); operands[1] = simplify_gen_subreg (QImode, operands[1], mode, 0); } else if (mode == HImode) { operands[0] = simplify_gen_subreg (HImode, operands[0], mode, 0); operands[1] = simplify_gen_subreg (HImode, operands[1], mode, 0); } else if (mode == SImode) { operands[0] = simplify_gen_subreg (SImode, operands[0], mode, 0); operands[1] = simplify_gen_subreg (SImode, operands[1], mode, 0); }" GCC,arc,166,"static void workaround_arc_anomaly(void) { if (arc_hazard_p()) { emit_insn(gen_nop()); } } ``` This function checks if the `arc_hazard` is present, and if it is, it inserts a `NOP` instruction to ensure that the branch is not shortened. The `arc_hazard` is a target-specific value that indicates the presence of a hazard in the ARC architecture. You can call this function in the machine reorg phase of the GCC compiler by adding the following code to your machine reorg pass: ``` void machine_reorg(void) { workaround_arc_anomaly(); }" GCC,arc,167,int arc_address_cost(int ADDR) { int cost = 0; if (ADDR >= 0 && ADDR <= 15) { cost = arc_target_specific_value[ADDR]; } return cost; } GCC,arc,168,"arc_autovectorize_vector_sizes (void) { static const int arc_vector_sizes[] = { 8, 4, 0 }; targetm.vectorize.autovectorize_vector_sizes = arc_vector_sizes; }" GCC,arc,169,"arc_emit_call_tls_get_addr (rtx op, rtx addr) { rtx reg = gen_reg_rtx (Pmode); emit_insn (gen_rtx_SET (VOIDmode, reg, gen_rtx_MEM (Pmode, addr))); emit_insn (gen_rtx_SET (VOIDmode, op, gen_rtx_MEM (Pmode, reg))); }" GCC,arc,170,"arc_epilogue_uses (int regno) { if (regno == arc_return_address_regs[arc_compute_function_type (cfun)]) return true; if (regno == ARC_BLINK_REGNUM && arc_isr_clobber_blink (cfun)) return true; if (arc_is_interrupt_function (cfun) && arc_live_in_reg (regno, cfun)) return true; return false; }" GCC,arc,171,"arc_finalize_pic (void) { if (TREE_CODE (current_function_decl) == FUNCTION_DECL && DECL_GOTOFF_RELOCATIONS (current_function_decl)) { rtx_insn *insn = emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, GOTBASE_REGNUM), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, PC_REGNUM), gen_rtx_CONST (SImode, gen_rtx_UNSPEC (SImode, gen_rtx_SYMREF (SImode, ""_DYNAMIC""), 3)))); emit_insn_before (insn, get_insns ()); } }" GCC,arc,172,void *arc_get_tp(void) { #ifdef __arc__ return (void *)ARC_THREAD_POINTER_REG; #else return NULL; #endif } GCC,arc,173, GCC,arc,174,void arc_init_reg_tables(void) { uint32_t i; for (i = 0; i < 32; i++) { arc_mode_class[i] = (i == 0) ? 4 : (i == 1) ? 8 : (i == 2) ? 16 : (i == 3) ? 32 : 0; } } GCC,arc,175,"arc_label_align (rtx_insn *label, int align) { if (INSN_ALIGN (label) >= align) return; INSN_ALIGN (label) = align; rtx_insn *insn; for (insn = NEXT_INSN (label); insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { INSN_ALIGN (insn) = align; break; } } }" GCC,arc,176,arc_legitimate_constant_p (rtx x) { if (!CONSTANT_P (x)) return false; if (GET_CODE (x) == CONST_INT) { HOST_WIDE_INT val = INTVAL (x); if (val == 0 || val == 1 || val == 1 || val == 0 || val == 1) return true; } return false; } GCC,arc,177,"arc_legitimate_pc_offset_p (rtx addr) { if (GET_CODE (addr) == PLUS) { rtx base = XEXP (addr, 0); rtx offset = XEXP (addr, 1); if (GET_CODE (base) == REG && REGNO (base) == PC_REGNUM) { if (GET_CODE (offset) == CONST_INT) { HOST_WIDE_INT val = INTVAL (offset); if (val >= 0 && val < 32768) return true; } } } return false; }" GCC,arc,178,"arc_legitimate_pic_addr_p (rtx addr) { if (GET_CODE (addr) != CONST) return false; if (GET_CODE (XEXP (addr, 0)) != UNSPEC) return false; if (XINT (XEXP (addr, 0), 1) != ARC_UNSPEC_GOTOFF && XINT (XEXP (addr, 0), 1) != ARC_UNSPEC_GOT) return false; if (GET_CODE (XEXP (addr, 1)) != SYMBOL_REF && GET_CODE (XEXP (addr, 1)) != LABEL_REF) return false; return true; }" GCC,arc,179,"arc_legitimize_tls_address (rtx addr, enum tls_model model) { rtx base, offset; if (arc_legitimate_address_p (addr, 0, 0)) return addr; base = gen_rtx_REG (Pmode, 0); offset = gen_rtx_CONST_INT (Pmode, 0); if (GET_CODE (addr) == SYMBOL_REF) { rtx sym = gen_rtx_SYMBOL_REF (Pmode, XSTR (addr, 0)); rtx insn = emit_insn (gen_rtx_SET (base, sym)); RTX_FRAME_RELATED_P (insn) = 1; } else if (GET_CODE (addr) == CONST_INT) { offset = gen_rtx_PLUS (Pmode, offset, addr); } else gcc_unreachable (); return gen_rtx_PLUS (Pmode, base, offset); }" GCC,arc,180,"arc_needs_pcl_p (rtx op, machine_mode mode) { if (mode == VOIDmode) return false; if (GET_CODE (op) == REG && REGNO (op) == PC_REGNUM) return true; if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == REG && REGNO (XEXP (op, 0)) == PC_REGNUM) return true; return false; }" GCC,arc,181,"int arc_output_addsi (rtx *operands, int cond_p, int output_p) { int length = 0; if (output_p) { length += arc_emit_insn (operands, cond_p); } return length; }" GCC,arc,182,"int arc_output_commutative_cond_exec (rtx_insn *insn, rtx *operands, int output_p) { int length = 0; if (output_p) { switch (GET_CODE (insn)) { case BMSK: length = 2; break; case BMSKN: length = 2; break; case BCLR: length = 2; break; case BIC: length = 2; break; case BSET: length = 2; break; case BXOR: length = 2; break; default: gcc_unreachable (); } switch (GET_CODE (insn)) { case BMSK: output_asm_insn (""bmsk%? %0,%1,%Z2"", operands); break; case BMSKN: output_asm_insn (""bmskn%? %0,%1,%Z2"", operands); break; case BCLR: output_asm_insn (""bclr%? %0,%1,%M2"", operands); break; case BIC: output_asm_insn (""bic%? %0,%1,%n2-1"", operands); break; case BSET: output_asm_insn (""bset%? %0,%1,%z2"", operands); break; case BXOR: output_asm_insn (""bxor%? %0,%1,%z2"", operands); break; default: gcc_unreachable (); } }" GCC,arc,183,"arc_preferred_simd_mode (enum machine_mode mode, enum machine_mode *mode_out) { if (mode == VOIDmode || mode == BLKmode) return mode; if (mode == QImode || mode == HImode) return SImode; if (mode == SImode) return V4SImode; if (mode == DImode) return V2DImode; if (mode == TImode) return V2TImode; if (mode == OImode) return V4OImode; if (mode == CImode) return V4CImode; if (mode == XImode) return V4XImode; if (mode == PImode) return V4PImode; if (mode == HFmode) return V2HFmode; if (mode == SFmode) return V4SFmode; if (mode == DFmode) return V2DFmode; if (mode == XFmode) return V2XFmode; if (mode == TFmode) return V2TFmode; if (mode == SCmode) return V4SCmode; if (mode == DCmode) return V2DCmode; if (mode == XCmode) return V2XCmode; if (mode == TCmode) return V2TCmode; return mode; }" GCC,arc,184,"arc_print_operand_address (rtx x, int code, int mode) { if (code == 0) { fprintf (asm_out_file, ""@%s"", XSTR (x, 0)); } else { fprintf (asm_out_file, ""%d"", INTVAL (x)); } }" GCC,arc,185,"void arc_process_double_reg_moves(uint32_t *dest, uint32_t *src) { uint32_t temp; if (src[0] == 0 && src[1] == 0) { return; }" GCC,arc,186,"arc_register_move_cost (rtx op, enum machine_mode mode) { int cost = 0; switch (mode) { case QImode: cost = 1; break; case HImode: cost = 2; break; case SImode: cost = 3; break; case DImode: cost = 4; break; case TImode: cost = 5; break; default: gcc_unreachable (); } return cost; }" GCC,arc,187,"arc_rewrite_small_data_p (rtx x) { if (GET_CODE (x) != MEM) return false; rtx addr = XEXP (x, 0); if (GET_CODE (addr) != PLUS) return false; rtx base = XEXP (addr, 0); if (GET_CODE (base) != REG) return false; rtx index = XEXP (addr, 1); if (GET_CODE (index) != CONST_INT) return false; HOST_WIDE_INT offset = INTVAL (index); if (offset < 0 || offset >= 0x10000) return false; return true; }" GCC,arc,188,"bool arc_store_addr_hazard_p(rtx_insn *consumer, rtx_insn *producer) { if (consumer->type != RTX_INSN_TYPE_LOAD) return false; if (producer->type != RTX_INSN_TYPE_STORE) return false; if (consumer->addr_reg == producer->addr_reg) return true; return false; }" GCC,arc,189,arc_vector_mode_supported_p (machine_mode mode) { if (mode == VOIDmode) return false; switch (mode) { case QImode: case HImode: case SImode: case DImode: case TImode: case OImode: case XImode: return true; default: return false; } } GCC,arc,190,"emit_unlikely_jump (rtx_insn *insn) { rtx_insn *prev_insn = PREV_INSN (insn); if (prev_insn && GET_CODE (prev_insn) == JUMP_INSN) { rtx_insn *target_insn = JUMP_LABEL (prev_insn); if (target_insn && GET_CODE (target_insn) == CODE_LABEL) { rtx_insn *new_insn = emit_jump_insn_before (target_insn, prev_insn); new_insn->set_unlikely (); } } }" GCC,arc,191,"uint32_t force_offsettable(uint32_t addr, uint32_t size, bool reuse) { uint32_t offset = 0; uint32_t mask = ARC_TARGET_SPECIFIC_VALUE; if (reuse) { offset = addr & mask; addr = addr - offset; } asm volatile ( ""mov %0, %1\n"" ""and %2, %3\n"" ""add %0, %4\n"" : ""=r"" (addr) : ""r"" (addr), ""r"" (mask), ""r"" (size), ""r"" (offset) : ""memory"" ); return addr; }" GCC,arc,192,"prepare_move_operands (rtx op, enum machine_mode mode) { if (mode == MODE_INT) { if (GET_CODE (op) == CONST_INT) { op = gen_int_mode (INTVAL (op), mode); } else if (GET_CODE (op) == CONST_DOUBLE) { op = gen_int_mode (CONST_DOUBLE_LOW (op), mode); }" GCC,arc,193,"void prepare_pic_move(rtx *operands, int num_operands) { int i; for (i = 1; i < num_operands; i++) { rtx src = operands[i]; rtx dest = operands[0]; emit_move_insn(dest, src); } }" GCC,arc,194,"static void workaround_arc_anomaly(void) { if (arc_hazard_p()) { emit_insn(gen_nop()); } } ``` This function checks if the `arc_hazard` is present, and if it is, it inserts a `NOP` instruction to ensure that the branch is not shortened. The `arc_hazard` is a target-specific value that indicates the presence of a hazard in the ARC architecture. You can call this function in the machine reorg phase of the GCC compiler by adding the following code to your machine reorg pass: ``` void machine_reorg(void) { workaround_arc_anomaly(); }" GCC,arc,195,arc_autovectorize_vector_sizes (void) { if (arc_vector_size_supported_p (8)) { add_autovectorize_vector_size (8); } if (arc_vector_size_supported_p (4)) { add_autovectorize_vector_size (4); } } GCC,arc,196,"uintptr_t arc_builtin_setjmp_frame_value(void) { uintptr_t frame_ptr = __builtin_arc_get_frame_ptr(); return frame_ptr; } ``` This function uses the `__builtin_arc_get_frame_ptr` built-in function to get the current frame pointer value, and then returns it. The `uintptr_t` type is used to ensure that the returned value is large enough to hold a pointer value. You can use this function in your code to get the frame pointer value to be backed up in the setjmp buffer. For example: ``` #include int main(void) { jmp_buf env; uintptr_t frame_ptr = arc_builtin_setjmp_frame_value(); setjmp(env, frame_ptr); }" GCC,arc,197,"bool arc_can_eliminate(int from, int to) { if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM) { return true; } if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM) { return false; } return true; }" GCC,arc,198,int arc_compute_frame_size(int size) { int frame_size; frame_size = (size + 31) & ~31; return frame_size; } GCC,arc,199,"enum arc_function_type { ARC_FUNCTION_TYPE_NORMAL, ARC_FUNCTION_TYPE_NAKED, ARC_FUNCTION_TYPE_INTERRUPT, ARC_FUNCTION_TYPE_ILINK1, ARC_FUNCTION_TYPE_ILINK, ARC_FUNCTION_TYPE_ILINK2, ARC_FUNCTION_TYPE_FIRQ, }; static enum arc_function_type arc_compute_function_type (tree decl) { static enum arc_function_type cached_type = ARC_FUNCTION_TYPE_NORMAL; if (decl == NULL_TREE) { cached_type = ARC_FUNCTION_TYPE_NORMAL; return cached_type; } if (TREE_CODE (decl) == FUNCTION_DECL) { if (DECL_NAKED_P (decl)) return ARC_FUNCTION_TYPE_NAKED; else if (DECL_INTERRUPT_P (decl)) return ARC_FUNCTION_TYPE_INTERRUPT; else if (DECL_ILINK1_P (decl)) return ARC_FUNCTION_TYPE_ILINK1; else if (DECL_ILINK_P (decl)) return ARC_FUNCTION_TYPE_ILINK; else if (DECL_ILINK2_P (decl)) return ARC_FUNCTION_TYPE_ILINK2; else if (DECL_FIRQ_P (decl)) return ARC_FUNCTION_TYPE_FIRQ; else return ARC_FUNCTION_TYPE_NORMAL; } return cached_type; }" GCC,arc,200,"arc_eh_return_address_location (void) { return gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, SP_REGNUM), GEN_INT (4))); }" GCC,arc,201,"arc_epilogue_uses (int regno) { if (regno == arc_return_address_regs[arc_compute_function_type (cfun)]) return true; if (regno == ARC_BLINK_REGNUM && arc_isr_clobber_blink (cfun)) return true; if (arc_is_interrupt_function (cfun) && arc_live_in_reg (regno, cfun)) return true; return false; }" GCC,arc,202, GCC,arc,203,"void arc_expand_prologue(void) { __asm__ volatile ( ""mov r1, r15\n"" ""mov r2, r14\n"" ""mov r3, r13\n"" ""mov r4, r12\n"" ""mov r5, r11\n"" ""mov r6, r10\n"" ""mov r7, r9\n"" ""mov r8, r8\n"" ""mov r9, r7\n"" ""mov r10, r6\n"" ""mov r11, r5\n"" ""mov r12, r4\n"" ""mov r13, r3\n"" ""mov r14, r2\n"" ""mov r15, r1\n"" ::: ""memory"" ); }" GCC,arc,204,"void arc_file_start (void) { fprintf(asm_out_file, ""\t.cpu %s\n"", ""arc""); fprintf(asm_out_file, ""\t.arc_attribute Tag_ARC_PCS_config, %d\n"", 1); fprintf(asm_out_file, ""\t.arc_attribute Tag_ARC_ABI_rf16, %d\n"", 0); fprintf(asm_out_file, ""\t.arc_attribute Tag_ARC_ABI_pic, %d\n"", 2); fprintf(asm_out_file, ""\t.arc_attribute Tag_ARC_ABI_tls, %d\n"", 1); fprintf(asm_out_file, ""\t.arc_attribute Tag_ARC_ABI_sda, %d\n"", 0); fprintf(asm_out_file, ""\t.arc_attribute Tag_ARC_ABI_exceptions, %d\n"", 1); fprintf(asm_out_file, ""\t.arc_attribute Tag_ARC_CPU_variation, %d\n"", 3); }" GCC,arc,205, GCC,arc,206," bool *no_add_attrs) { tree attr = lookup_attribute (""aux"", args); if (attr == NULL_TREE) return; tree reg_num = TREE_VALUE (TREE_VALUE (attr)); if (TREE_CODE (reg_num) != INTEGER_CST) { error (""invalid aux attribute argument""); return; } int reg = TREE_INT_CST_LOW (reg_num); if (reg < 0 || reg >= 16) { error (""invalid aux register number""); return; } tree type = TREE_TYPE (decl); if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); if (TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != FLOAT_TYPE) { error (""invalid type for aux attribute""); return; } if (TREE_CODE (decl) == VAR_DECL) { tree init = DECL_INITIAL (decl); if (init != NULL_TREE && TREE_CODE (init) != CONSTRUCTOR) { error (""invalid initializer for aux attribute""); return; } if (TREE_CODE (init) == CONSTRUCTOR) { tree elt = CONSTRUCTOR_ELTS (init); if (TREE_CODE (elt) != INTEGER_CST) { error (""invalid initializer for aux attribute""); return; }" GCC,arc,207, GCC,arc,208,"arc_initial_elimination_offset (rtx_insn *insn, int regno) { int offset = insn->address - arc_backend_get_routine_start_address (insn); if (regno != arc_backend_get_replacement_register (insn)) offset += arc_backend_get_register_size (insn, regno) - arc_backend_get_register_size (insn, arc_backend_get_replacement_register (insn)); return offset; }" GCC,arc,209,arc_is_uncached_mem_p (rtx p) { if (GET_CODE (p) != MEM) return false; if (GET_MODE (p) == QImode) return true; if (GET_MODE (p) == HImode) return true; if (GET_MODE (p) == SImode) return true; if (GET_MODE (p) == DImode) return true; return false; } GCC,arc,210,"arc_label_align (rtx_insn *label, int align) { if (INSN_ALIGN (label) >= align) return; INSN_ALIGN (label) = align; rtx_insn *insn; for (insn = NEXT_INSN (label); insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { INSN_ALIGN (insn) = align; break; } } }" GCC,arc,211,int arc_must_save_register(uint32_t regno) { switch (regno) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 10: case 11: case 12: case 13: case 14: case 15: case 16: case 17: case 18: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: case 28: case 29: case 30: case 31: return 1; default: return 0; } } GCC,arc,212,"void arc_output_mi_thunk (int delta, void *function) { asm volatile (""\tld\t%s, [%s]\n"" ""\tadd\t%s, %s, %s\n"" ""\tld\t%s, [%s]\n"" ""\tadd\t%s, %s, %s\n"" ""\tj\t[%s]\n"" : ""=r"" (delta), ""=r"" (function) : ""0"" (delta), ""1"" (function) : ""memory""); }" GCC,arc,213,"arc_register_move_cost (rtx_insn *insn, rtx_insn *next_insn) { int cost = 0; if (REG_P (insn) && REG_P (next_insn) && REGNO_REG_CLASS (REGNO (insn)) == REGNO_REG_CLASS (REGNO (next_insn))) { cost = 1; } else { cost = 10; } return cost; }" GCC,arc,214,int arc_return_address_register(TYPE type) { switch (type) { case ARC_RETURN_ADDRESS_REGISTER: return 0; case ARC_RETURN_ADDRESS_REGISTER_2: return 1; case ARC_RETURN_ADDRESS_REGISTER_3: return 2; case ARC_RETURN_ADDRESS_REGISTER_4: return 3; default: return -1; } } GCC,arc,215,"void arc_rewrite_small_data(uint32_t op, uint32_t *data) { uint32_t offset = op & 0xFFFF; uint32_t size = (op >> 16) & 0xFFFF; if (size == 0) { return; } uint32_t small_data_offset = (uint32_t)data - (uint32_t)arc_small_data_start; if (small_data_offset >= arc_small_data_size) { return; } uint32_t new_offset = small_data_offset + offset; if (new_offset >= arc_small_data_size) { return; } op = (op & 0xFFFF0000) | (new_offset & 0xFFFF); }" GCC,arc,216,void arc_rewrite_small_data_1(uint32_t op) { if (op & 0x80000000) { uint32_t small_data_offset = op & 0x7FFFFFFF; uint32_t small_data_address = arc_0 + small_data_offset; op = small_data_address; } } GCC,arc,217, GCC,arc,218,"int arc_verify_short(rtx insn, int unalign, int check_attr) { if (check_attr > 0) { if (GET_CODE(insn) == INSN && GET_MODE(insn) == QImode) { return 1; } } else if (unalign == 0) { if (GET_CODE(insn) == INSN && GET_MODE(insn) == QImode) { return 1; } } else { return 0; } }" GCC,arc,219, GCC,arc,220,"void irq_range(char *range, char *blink, char *lp_count) { char *p = range; char *q = blink; char *r = lp_count; while (*p != '\0') { if (*p == '-') { if (p[1] == 'R' && p[2] >= '0' && p[2] <= '9') { if (p[3] == '0') { if (p[4] != '0') { fprintf(stderr, ""value of -mirq-ctrl-saved must have form R0-REGx\n""); exit(1); } } else { if (p[3] != 'R' || p[4] < '0' || p[4] > '9') { fprintf(stderr, ""unknown register name: %s\n"", p + 3); exit(1); } } } else { fprintf(stderr, ""value of -mirq-ctrl-saved must have form R0-REGx\n""); exit(1); } } else { fprintf(stderr, ""value of -mirq-ctrl-saved must have form R0-REGx\n""); exit(1); }" GCC,arc,221,"legitimate_scaled_address_p (rtx op) { if (!MEM_P (op)) return false; rtx addr = XEXP (op, 0); if (GET_CODE (addr) != PLUS) return false; rtx base = XEXP (addr, 0); rtx index = XEXP (addr, 1); if (GET_CODE (base) != REG || GET_CODE (index) != REG) return false; if (REGNO (base) != 0 || REGNO (index) != 0) return false; if (GET_MODE (op) != SImode) return false; return true; }" GCC,arc,222,"parse_mrgf_banked_regs_option (const char *option) { int num; if (sscanf (option, ""-mrgf-banked-regs=%d"", &num) != 1) error (""invalid number in -mrgf-banked-regs=%s"", option); if (num != 4 && num != 8 && num != 16 && num != 32) error (""valid values are 0, 4, 8, 16, or 32""); target_specific_value = num; }" GCC,arc,223,"bool small_data_pattern(rtx op) { if (GET_CODE(op) == SYMBOL_REF) { const char *name = XSTR(op, 0); if (strcmp(name, ""arc"") == 0) { return true; } } return false; }" GCC,arc,224,"uintptr_t arc_builtin_setjmp_frame_value(void) { uintptr_t frame_ptr = __builtin_arc_get_frame_ptr(); return frame_ptr; } ``` This function uses the `__builtin_arc_get_frame_ptr` built-in function to get the current frame pointer value, and then returns it. The `uintptr_t` type is used to ensure that the returned value is large enough to hold a pointer value. You can use this function in your code to get the frame pointer value to be backed up in the setjmp buffer. For example: ``` #include int main(void) { jmp_buf env; uintptr_t frame_ptr = arc_builtin_setjmp_frame_value(); setjmp(env, frame_ptr); }" GCC,arc,225,"arc_compute_frame_size (int size) { int frame_size; frame_size = arc_target_specific_value (size, 0, 0, 0, 31, 1L, 0, 1L, 0, 4, 4); return frame_size; }" GCC,arc,226, GCC,arc,227,void arc_expand_epilogue(void) { arc_expand_epilogue_sp(); arc_expand_epilogue_fp(); arc_expand_epilogue_regs(); } void arc_expand_epilogue_sp(void) { arc_expand_epilogue_sp_reg(); } void arc_expand_epilogue_fp(void) { arc_expand_epilogue_fp_reg(); } void arc_expand_epilogue_regs(void) { arc_expand_epilogue_regs_reg(); } void arc_expand_epilogue_sp_reg(void) { arc_expand_epilogue_sp_reg_reg(); } void arc_expand_epilogue_fp_reg(void) { arc_expand_epilogue_fp_reg_reg(); } void arc_expand_epilogue_regs_reg(void) { arc_expand_epilogue_regs_reg_reg(); } void arc_expand_epilogue_sp_reg_reg(void) { arc_expand_epilogue_sp_reg_reg_reg(); } void arc_expand_epilogue_fp_reg_reg(void) { arc_expand_epilogue_fp_reg_reg_reg(); } GCC,arc,228,"void arc_expand_prologue(void) { __asm__(""arc,0,0,0,0,0,14,0,0""); }" GCC,arc,229, GCC,arc,230,bool arc_is_uncached_mem_p (void *addr) { if (addr == NULL) return FALSE; if (!arc_is_mem_p(addr)) return FALSE; if (arc_is_uncached_p(addr)) return TRUE; return FALSE; } GCC,arc,231,int arc_must_save_register(uint32_t regno) { switch (regno) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 10: case 11: case 12: case 13: case 14: case 15: case 16: case 17: case 18: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: case 28: case 29: case 30: case 31: return 1; default: return 0; } } GCC,arc,232,"void arc_restore_callee_saves(void) { __asm__ __volatile__ ( ""ld r0, [sp, #0]\n"" ""ldd r1, [sp, #4]\n"" ""ld r2, [sp, #8]\n"" ""ldd r3, [sp, #12]\n"" ""ld r4, [sp, #16]\n"" ""ldd r5, [sp, #20]\n"" ""ld r6, [sp, #24]\n"" ""ldd r7, [sp, #28]\n"" ""ld r8, [sp, #32]\n"" ""ldd r9, [sp, #36]\n"" ""ld r10, [sp, #40]\n"" ""ldd r11, [sp, #44]\n"" ""ld r12, [sp, #48]\n"" ""ldd r13, [sp, #52]\n"" ""ld r14, [sp, #56]\n"" ""ldd r15, [sp, #60]\n"" ::: ""memory"" ); }" GCC,arc,233,int arc_return_address_register(TYPE type) { switch (type) { case ARC_RETURN_ADDRESS_REGISTER: return 0; case ARC_RETURN_ADDRESS_REGISTER_2: return 1; case ARC_RETURN_ADDRESS_REGISTER_3: return 2; case ARC_RETURN_ADDRESS_REGISTER_4: return 3; default: return -1; } } GCC,arc,234,"void arc_save_callee_enter(void) { __asm__ __volatile__ ( ""enter %0, %1\n"" : ""=r"" (__arc_save_callee_enter_regs) : ""r"" (__arc_save_callee_enter_regs) ); __asm__ __volatile__ ( ""enter %0, %1\n"" : ""=r"" (__arc_save_callee_enter_regs) : ""r"" (__arc_save_callee_enter_regs) ); __asm__ __volatile__ ( ""enter %0, %1\n"" : ""=r"" (__arc_save_callee_enter_regs) : ""r"" (__arc_save_callee_enter_regs) ); }" GCC,arc,235,"void arc_save_callee_milli(uint32_t gmask, uint32_t *regs) { uint32_t i; uint32_t reg_range_start = ARC_REG_R13; uint32_t reg_range_end = ARC_REG_R25; uint32_t reg_range_size = reg_range_end - reg_range_start + 1; for (i = reg_range_start; i <= reg_range_end; i++) { if (gmask & (1 << i)) { arc_millicode_call(i, regs[i]); } } } void arc_millicode_call(uint32_t reg, uint32_t value) { }" GCC,arc,236, GCC,arc,237," int unsignedp, int volatilep) { if (GET_CODE (op) == MEM) { rtx addr = XEXP (op, 0); if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 0)) == REG && REGNO (XEXP (addr, 0)) == SDA_BASE_REGNUM && GET_CODE (XEXP (addr, 1)) == CONST_INT) { HOST_WIDE_INT offset = INTVAL (XEXP (addr, 1)); if (offset >= 0 && offset < 4096) { fputs (""["", asm_out_file); output_addr_const (asm_out_file, addr); fputs (""]"", asm_out_file); return; } } } output_operand_lossage (""invalid memory operand for `compact_sda_memory_operand'""); }" GCC,arc,238,"legitimate_scaled_address_p (rtx op) { if (GET_CODE (op) != MEM) return false; rtx addr = XEXP (op, 0); if (GET_CODE (addr) != PLUS) return false; rtx base = XEXP (addr, 0); if (GET_CODE (base) != REG) return false; rtx index = XEXP (addr, 1); if (GET_CODE (index) != REG) return false; if (GET_MODE (base) != GET_MODE (index)) return false; if (GET_MODE_SIZE (GET_MODE (base)) != 4) return false; if (GET_MODE_SIZE (GET_MODE (index)) != 4) return false; if (GET_CODE (XEXP (index, 0)) != CONST_INT) return false; HOST_WIDE_INT scale = INTVAL (XEXP (index, 0)); if (scale != 1 && scale != 2 && scale != 4 && scale != 8) return false; return true; }" GCC,arc,239,"legitimate_small_data_address_p (rtx x) { if (GET_CODE (x) == REG) return true; if (GET_CODE (x) == MEM) { rtx addr = XEXP (x, 0); if (GET_CODE (addr) == REG) return true; if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 0)) == REG && GET_CODE (XEXP (addr, 1)) == CONST_INT && INTVAL (XEXP (addr, 1)) < 4096) return true; } return false; } ``` This function checks if `x` is a register or a memory location, and if it is a memory location, it checks if the address is a register or a sum of a register and a small constant. If any of these conditions are true, the function returns true, indicating that `x` is a valid small data address. You can use this function in your GCC backend to determine if a given address is a valid small data address. For example, you can use it in the `legitimate_address_p` function to check if an address is a valid small data address before attempting to load or store data to it. ``` static inline bool legitimate_address_p (rtx x, bool strict) { if (strict) return legitimate_strict_address_p (x); else return legitimate_small_data_address_p (x); }" GCC,arc,240," struct arc_expand_builtin_args *args) { } ``` This function takes two arguments: * `builtin`: A pointer to a `struct arc_expand_builtin` structure that contains information about the built-in function being expanded. * `args`: A pointer to a `struct arc_expand_builtin_args` structure that contains the arguments to the built-in function. The function generates the pattern for the given icode and arguments, and stores the result in the `builtin` structure. Here is an example of how this function could be used in the `arc_expand_builtin` function: ``` static void arc_expand_builtin(struct arc_expand_builtin *builtin, struct arc_expand_builtin_args *args) { apply_GEN_FCN(builtin, args); }" GCC,arc,241,int arc600_corereg_hazard(int insn) { int length = 0; if (insn >= 32 && insn <= 61) { length = 4; } else if (insn >= 3 && insn <= 4) { length = 0; } else { length = 1; } return length; } GCC,arc,242,arc_active_insn (rtx insn) { while (insn && !INSN_DELETED_P (insn) && !INSN_ACTIVE_P (insn)) insn = NEXT_INSN (insn); if (insn && INSN_DELETED_P (insn)) return NULL_RTX; return insn; } GCC,arc,243,int arc_address_cost(int ADDR) { int cost = 0; if (ADDR >= 0 && ADDR <= 15) { cost = arc_target_specific_value[ADDR]; } return cost; } GCC,arc,244,arc_add_jli_section (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL) { if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE && TYPE_ARRAY_ELEMENT (TREE_TYPE (decl)) == INTEGER_TYPE && TYPE_ARRAY_ELEMENT (TREE_TYPE (decl)) == ARC_JLI_SECTION) { TREE_CODE (decl) = JLI_SECTION; TREE_TYPE (decl) = TREE_TYPE (TREE_TYPE (decl)); TREE_VALUE (decl) = TREE_VALUE (TREE_TYPE (decl)); TREE_CHAIN (decl) = TREE_CHAIN (TREE_TYPE (decl)); TREE_CODE (TREE_TYPE (decl)) = ARRAY_TYPE; TREE_TYPE (TREE_TYPE (decl)) = TREE_TYPE (TREE_TYPE (decl)); TREE_VALUE (TREE_TYPE (decl)) = TREE_VALUE (TREE_TYPE (decl)); TREE_CHAIN (TREE_TYPE (decl)) = TREE_CHAIN (TREE_TYPE (decl)); } } } GCC,arc,245,int arc_adjust_insn_length (rtx_insn *insn) { if (GET_CODE (insn) == SET && (GET_CODE (SET_SRC (insn)) == MEM || GET_CODE (SET_DEST (insn)) == MEM)) { if (GET_CODE (SET_SRC (insn)) == MEM) { if (GET_MODE (SET_SRC (insn)) == QImode) { return 4; } else if (GET_MODE (SET_SRC (insn)) == HImode) { return 8; } else { return 16; } } else { if (GET_MODE (SET_DEST (insn)) == QImode) { return 4; } else if (GET_MODE (SET_DEST (insn)) == HImode) { return 8; } else { return 16; } } } else { return 0; } } GCC,arc,246,arc_adjust_reg_alloc_order (void) { if (flag_size_optimization) { arc_reg_alloc_order = ARC_REG_ALLOC_ORDER_SIZE; } else { arc_reg_alloc_order = ARC_REG_ALLOC_ORDER_DEFAULT; } } GCC,arc,247,"arc_allocate_stack_slots_for_args (void) { for (int i = 0; i < crtl->args.size (); i++) { tree arg = crtl->args[i]; if (arg->type->mode == BLKmode) { tree slot = build_decl (UNKNOWN_LOCATION, VAR_DECL, NULL_TREE, arg->type); DECL_EXTERNAL (slot) = 1; DECL_ARTIFICIAL (slot) = 1; DECL_IGNORED_P (slot) = 1; TREE_ADDRESSABLE (slot) = 1; DECL_CONTEXT (slot) = current_function_decl; DECL_SOURCE_LOCATION (slot) = UNKNOWN_LOCATION; DECL_INITIAL (slot) = error_mark_node; crtl->args.safe_push (slot); } } }" GCC,arc,248," int byte_align, int byte_reverse) { if (byte_offset != 0 || byte_size != 4 || byte_align != 4 || byte_reverse != 0) { error (""ARC does not support partial byte loads or stores""); return; } if (byte_offset != 0 || byte_size != 4 || byte_align != 4 || byte_reverse != 0) { error (""ARC does not support partial byte loads or stores""); return; } if (byte_offset != 0 || byte_size != 4 || byte_align != 4 || byte_reverse != 0) { error (""ARC does not support partial byte loads or stores""); return; } if (byte_offset != 0 || byte_size != 4 || byte_align != 4 || byte_reverse != 0) { error (""ARC does not support partial byte loads or stores""); return; }" GCC,arc,249,arc_asm_insn_p (rtx insn) { if (GET_CODE (insn) == ASM_INPUT || GET_CODE (insn) == ASM_OPERANDS) return true; if (GET_CODE (insn) == CONST_INT && INTVAL (insn) == ARC_TARGET_SPECIFIC_VALUE) return true; return false; } GCC,arc,250,"void arc_asm_output_aligned_decl_local (FILE *stream, tree decl, int align) { if (align > 0) { fprintf (stream, ""\t.sbss\n""); fprintf (stream, ""\t.align %d\n"", align); } else { fprintf (stream, ""\t.sbss\n""); } fprintf (stream, ""\t.local %s\n"", IDENTIFIER_POINTER (DECL_NAME (decl))); fprintf (stream, ""\t.type %s, @object\n"", IDENTIFIER_POINTER (DECL_NAME (decl))); fprintf (stream, ""\t.size %s, 0\n"", IDENTIFIER_POINTER (DECL_NAME (decl))); }" GCC,arc,251,"arc_asm_trampoline_template (FILE *stream, rtx insn) { rtx operands[3]; int i; for (i = 0; i < 3; i++) operands[i] = XEXP (insn, i); fprintf (stream, ""\tld_s\t%s,[pcl,8]\n"", operands[0]); fprintf (stream, ""\tld\t%s,[pcl,12]\n"", operands[1]); fprintf (stream, ""\tj_s\t[%s]\n"", operands[2]); }" GCC,arc,252,"int arc_attr_type(rtx insn) { if (GET_CODE(insn) == INSN) { if (GET_ATTR(insn, ""type"") == NULL) return -1; else return GET_ATTR(insn, ""type"")->value; } else return -1; }" GCC,arc,253,"arc_autovectorize_vector_modes (tree type, int *nunits) { switch (TREE_CODE (type)) { case INTEGER_TYPE: *nunits = 4; break; case REAL_TYPE: *nunits = 4; break; case COMPLEX_TYPE: *nunits = 4; break; default: *nunits = 0; break; } return true; }" GCC,arc,254,"arc_branch_size_unknown_p (void) { if (TARGET_ARC) { if (optimize_size) { return false; } else { return true; } } else { return false; } } ``` This function returns `true` if the target architecture is ARC and the optimization level is greater than 0 (i.e., we're optimizing for speed), and `false` otherwise. You can then use this function in the `arc_reorg` function to determine whether or not to consider branches to be length 6 when estimating sizes. For example: ```c if (arc_branch_size_unknown_p ()) { /* Consider branches to be length 6. */ ... } else { /* Don't consider branches to be length 6. */ ... }" GCC,arc,255,"arc_builtin_decl (tree decl, tree name, tree type, tree value) { if (!TARGET_ARC) return; if (name == ""__builtin_arc_sync_lock"") { ... } else if (name == ""__builtin_arc_sync_unlock"") { ... } else { ... } }" GCC,arc,256,"arc_call_tls_get_addr (rtx ti, rtx ret) { rtx insn; insn = gen_call_insn (gen_rtx_SYMBOL_REF (Pmode, ""__tls_get_addr""), ti, ret); insn->target_specific = 1; return insn; }" GCC,arc,257,"arc_cannot_force_const_mem (rtx x) { if (CONSTANT_P (x)) return false; if (REG_P (x)) return true; if (MEM_P (x)) { rtx addr = XEXP (x, 0); if (CONSTANT_P (addr) || REG_P (addr)) return false; } return true; }" GCC,arc,258,"arc_cannot_substitute_mem_equiv_p (rtx insn) { if (MEM_P (insn)) { if (GET_CODE (insn) == SET) { if (GET_CODE (SET_SRC (insn)) == MEM && GET_CODE (SET_DEST (insn)) == REG) { if (GET_MODE_SIZE (GET_MODE (SET_SRC (insn))) == 4) { if (GET_CODE (SET_SRC (insn)) == MEM && GET_CODE (XEXP (SET_SRC (insn), 0)) == REG && GET_CODE (XEXP (XEXP (SET_SRC (insn), 0), 0)) == PLUS && GET_CODE (XEXP (XEXP (XEXP (SET_SRC (insn), 0), 0), 1)) == CONST_INT && INTVAL (XEXP (XEXP (XEXP (SET_SRC (insn), 0), 0), 1)) % 4 != 0) { return true; } } } } } return false; }" GCC,arc,259,"bool arc_can_eliminate(int from, int to) { if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM) { return true; } if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM) { return false; } return true; }" GCC,arc,260,arc_can_follow_jump (rtx_insn *insn) { if (GET_CODE (insn) == JUMP_INSN) { rtx_insn *next_insn = NEXT_INSN (insn); if (next_insn && GET_CODE (next_insn) == JUMP_INSN) { return true; } } return false; } GCC,arc,261,"arc_can_use_doloop_p (void) { return (targetm.arc_target_specific_value[ARC_TARGET_SPECIFIC_VALUE_CAN_USE_DOLOOP] == 1); } ``` This function returns a boolean value indicating whether the ARC backend of the GCC compiler can use the `doloop` instruction. The `targetm.arc_target_specific_value` array is used to store target-specific values, and the `ARC_TARGET_SPECIFIC_VALUE_CAN_USE_DOLOOP` constant is used to access the value for the `can_use_doloop` property. You can then use this function in your code to determine whether the `doloop` instruction can be used for a particular target. For example: ``` if (arc_can_use_doloop_p()) { } else { }" GCC,arc,262,int arc_can_use_return_insn(void) { if (current_function_decl->epilogue == NULL) return 1; if (current_function_decl->frame_pointer != NULL) return 0; if (current_function_decl->local_variables != NULL) return 0; if (current_function_decl->outgoing_edges != NULL) return 0; if (current_function_decl->incoming_edges != NULL) return 0; return 1; } GCC,arc,263,"void arc_ccfsm_at_label(int num, const char *prefix) { arc_ccfsm_label_num = num; arc_ccfsm_label_prefix = prefix; if (num == arc_ccfsm_label_num && strcmp(prefix, arc_ccfsm_label_prefix) == 0) { arc_ccfsm_reset(); } } ``` This function takes two arguments: `num`, which is the label number, and `prefix`, which is the label prefix. It records the current label number and prefix, and if the label number and prefix match the target specific value `arc,3,""L"",0`, it resets the ccfsm machinery. You can call this function from the `targetm.asm_out.internal_label` function, which is the target-specific function for outputting internal labels. Here's an example of how you can modify the `targetm.asm_out.internal_label` function to call `arc_ccfsm_at_label`: ``` void targetm_asm_out_internal_label(int num, const char *prefix) { fprintf(asm_out_file, ""%s%d"", prefix, num); arc_ccfsm_at_label(num, prefix); }" GCC,arc,264, GCC,arc,265,"arc_ccfsm_cond_exec_p (rtx_insn *insn) { if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET && GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE) return true; return false; } ``` This function takes an `rtx_insn *` argument, which represents the current instruction being output. It checks if the instruction is a predicated instruction by checking if it has an `IF_THEN_ELSE` pattern. If it does, it returns `true`, otherwise it returns `false`. You can then use this function in the ARC backend of GCC to determine if an instruction is predicated or not. For example, you could use it in the `arc_output_insn` function to determine if an instruction should be output with a predicated condition code: ``` void arc_output_insn (rtx_insn *insn) { if (arc_ccfsm_cond_exec_p (insn)) { ... } else { ... } }" GCC,arc,266,"void arc_ccfsm_post_advance (rtx insn, rtx state) { if (GET_CODE (insn) == INSN && GET_CODE (state) == REG) { int arc_target_specific_value = 0; if (GET_MODE (state) == QImode) arc_target_specific_value = 5; else if (GET_MODE (state) == HImode) arc_target_specific_value = 1; else if (GET_MODE (state) == SImode) arc_target_specific_value = 0; else if (GET_MODE (state) == DImode) arc_target_specific_value = 1; if (arc_target_specific_value != 0) { rtx new_state = gen_rtx_PLUS (GET_MODE (state), state, GEN_INT (arc_target_specific_value)); emit_insn (new_state); } } }" GCC,arc,267,typedef struct { uint32_t insn; uint32_t branch_target; uint32_t branch_type; uint32_t branch_condition; } arc_ccfsm_record_branch_deleted_t; void arc_ccfsm_record_branch_deleted(arc_ccfsm_record_branch_deleted_t *record) { record->branch_type = ARC_CCFSM_BRANCH_TYPE_NOT_OUTPUT; record->branch_condition = ARC_CCFSM_BRANCH_CONDITION_NONE; } GCC,arc,268,"void arc_ccfsm_record_condition(int cond, int reverse) { if (reverse) { if (cond == 0) { ccfsm_state = ARC_CCFSM_CONDITIONAL; } else { ccfsm_state = ARC_CCFSM_UNCONDITIONAL; } } else { ccfsm_state = ARC_CCFSM_UNCONDITIONAL; } }" GCC,arc,269,"arc_check_ior_const (rtx op, machine_mode mode, int *val) { if (mode != QImode && mode != HImode && mode != SImode && mode != DImode) return false; if (GET_CODE (op) == CONST_INT) { if (INTVAL (op) == 0xffffffff) { *val = 0x3f; return true; } else if (INTVAL (op) == 0x3f) { *val = 0xffffffff; return true; } } return false; }" GCC,arc,270, GCC,arc,271,static inline bool arc_check_mov_const(uint32_t value) { return (value & 0xffffffff) == 0x8000001f && (value & 0x80000000) == 0 && (value & 0x7f000000) == 0 && (value & 0x3f000000) == 0 && (value & 0x3f0000) == 0 && (value & 0x3f00) == 0 && (value & 0x3f) == 0; } GCC,arc,272,"int arc_check_multi(rtx op, int push_p) { if (push_p) { if (op->code == ARC_OP_ENTER || op->code == ARC_OP_LEAVE) { return 1; } } else { if (op->code == ARC_OP_ENTER || op->code == ARC_OP_LEAVE) { return 1; } } return 0; }" GCC,arc,273,arc_clear_unalign (struct cfun *cfun) { cfun->machine->unalign = 0; } GCC,arc,274,uint64_t arc_compute_frame_size(uint64_t size) { uint64_t frame_size = 0; if (size == 0) { frame_size = 0; } else if (size == 1) { frame_size = 4; } else if (size == 2) { frame_size = 8; } else if (size == 3) { frame_size = 12; } else if (size == 4) { frame_size = 16; } else { frame_size = 20; } return frame_size; } GCC,arc,275,"enum arc_function_type { ARC_FUNCTION_TYPE_NORMAL, ARC_FUNCTION_TYPE_NAKED, ARC_FUNCTION_TYPE_INTERRUPT, ARC_FUNCTION_TYPE_ILINK1, ARC_FUNCTION_TYPE_ILINK, ARC_FUNCTION_TYPE_ILINK2, ARC_FUNCTION_TYPE_FIRQ, }; static enum arc_function_type arc_compute_function_type (tree decl) { static enum arc_function_type cached_type = ARC_FUNCTION_TYPE_NORMAL; if (decl == NULL_TREE) { cached_type = ARC_FUNCTION_TYPE_NORMAL; return cached_type; } if (TREE_CODE (decl) == FUNCTION_DECL) { if (DECL_NAKED_P (decl)) return ARC_FUNCTION_TYPE_NAKED; else if (DECL_INTERRUPT_P (decl)) return ARC_FUNCTION_TYPE_INTERRUPT; else if (DECL_ILINK1_P (decl)) return ARC_FUNCTION_TYPE_ILINK1; else if (DECL_ILINK_P (decl)) return ARC_FUNCTION_TYPE_ILINK; else if (DECL_ILINK2_P (decl)) return ARC_FUNCTION_TYPE_ILINK2; else if (DECL_FIRQ_P (decl)) return ARC_FUNCTION_TYPE_FIRQ; else return ARC_FUNCTION_TYPE_NORMAL; } return cached_type; }" GCC,arc,276,int arc_compute_millicode_save_restore_regs(struct arc_frame_info *frame) { unsigned int gmask = frame->gmask; int start_reg = 13; int end_reg = 25; if (gmask & (1ULL << 1)) { start_reg = 1; end_reg = 3; } frame->millicod_start_reg = start_reg; frame->millicod_end_reg = end_reg; return 1; } GCC,arc,277,"int arc_comp_type_attributes(tree type1, tree type2) { if (type1 == type2) return 0; if (type1 == long_call && type2 == long_call) return 1; if (type1 == medium_call && type2 == medium_call) return 1; if (type1 == short_call && type2 == short_call) return 1; if (type1 == long_call && type2 == medium_call) return 2; if (type1 == long_call && type2 == short_call) return 2; if (type1 == medium_call && type2 == short_call) return 2; return 0; }" GCC,arc,278,int arc_double_limm_p (const_double value) { if (value.is_zero()) return 1; if (value.is_negative()) return 0; if (value.is_positive()) return 1; return 0; } GCC,arc,279,"arc_dwarf_emit_irq_save_regs (rtx_insn *insn, rtx_insn *next_insn) { if (TARGET_ARC && TARGET_DWARF2 && TARGET_DWARF2_IRQ_CTRL) { const char *target_specific_value = ""arc,1,1,0,1,1,1,0,60,1,0,31,1,0,0,1,0,1""; dwarf2_emit_irq_save_regs (insn, next_insn, target_specific_value); } }" GCC,arc,280,"arc_dwarf_register_span (rtx reg) { if (GET_CODE (reg) == PARALLEL) { int num_regs = XVECLEN (reg, 0); rtx *reg_pieces = XNEWVEC (rtx, num_regs); for (int i = 0; i < num_regs; i++) { rtx reg_piece = XEXP (XVECEXP (reg, 0, i), 0); reg_pieces[i] = reg_piece; } return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (num_regs, reg_pieces)); } else { return NULL_RTX; } }" GCC,arc,281,"arc_eh_return_address_location (void) { rtx sp = gen_rtx_REG (Pmode, SP_REGNUM); rtx ra = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, sp, GEN_INT (-4))); ra = gen_rtx_TRUNCATE (Pmode, ra); rtx lr = gen_rtx_REG (Pmode, LR_REGNUM); emit_move_insn (lr, ra); return gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, sp, GEN_INT (-4))); }" GCC,arc,282, GCC,arc,283,"arc_encode_section_info (tree decl, rtx x, int first) { if (TREE_CODE (decl) == FUNCTION_DECL) { if (first) { const char *section_name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl)); if (strcmp (section_name, "".cmem"") == 0) { x = gen_rtx_CONST (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, section_name)); } else if (strcmp (section_name, "".cmem_shared"") == 0) { x = gen_rtx_CONST (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, section_name)); } else if (strcmp (section_name, "".cmem_private"") == 0) { x = gen_rtx_CONST (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, section_name)); } else { x = gen_rtx_CONST (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, section_name)); } }" GCC,arc,284,arc_enter_leave_p (rtx_insn *insn) { if (GET_CODE (insn) != INSN) return false; rtx pat = PATTERN (insn); if (GET_CODE (pat) != SET) return false; rtx src = SET_SRC (pat); if (GET_CODE (src) != REG) return false; int regno = REGNO (src); if (regno < 13 || regno > 26) return false; rtx dest = SET_DEST (pat); if (GET_CODE (dest) != REG) return false; if (regno != REGNO (dest)) return false; return true; } GCC,arc,285,"arc_epilogue_uses (int regno) { if (regno == arc_return_address_regs[arc_compute_function_type (cfun)]) return true; if (regno == ARC_BLINK_REGNUM && arc_isr_clobber_blink (cfun)) return true; if (arc_is_interrupt_function (cfun) && arc_live_in_reg (regno, cfun)) return true; return false; }" GCC,arc,286," rtx model_rtx) { rtx dest = gen_reg_rtx (GET_MODE (mem)); rtx src = gen_reg_rtx (GET_MODE (mem)); rtx temp = gen_reg_rtx (GET_MODE (mem)); emit_move_insn (src, mem); emit_insn (gen_rtx_SET (VOIDmode, dest, gen_rtx_fmt_ee (code, GET_MODE (mem), src, val))); emit_move_insn (mem, dest); if (before != NULL_RTX) emit_move_insn (before, src); if (after != NULL_RTX) emit_move_insn (after, dest); return dest; }" GCC,arc,287,"void arc_expand_builtin_aligned(void *val, int alignval) { uintptr_t ptr = (uintptr_t)val; uintptr_t aligned_ptr = (ptr + alignval - 1) & ~(alignval - 1); return aligned_ptr; } ``` This function takes two arguments: `val`, which is the pointer to be aligned, and `alignval`, which is the alignment value. It first casts the `val` pointer to a `uintptr_t` to ensure that it is properly aligned. It then calculates the aligned pointer by adding the alignment value to the original pointer, masking off the lower bits to ensure that the result is properly aligned, and returning the resulting pointer. Here's an example of how this function could be used: ```c int main() { int *ptr = (int *)malloc(sizeof(int)); int *aligned_ptr = arc_expand_builtin_aligned(ptr, 16); printf(""Aligned pointer: %p\n"", aligned_ptr); free(ptr); return 0; }" GCC,arc,288,"void arc_expand_compare_and_swap(uint32_t *ptr, uint32_t oldval, uint32_t newval) { uint32_t tmp; tmp = *ptr; if (tmp == oldval) { *ptr = newval; } return tmp; }" GCC,arc,289,"void arc_file_start (void) { const char *cpu_type = ""arc""; printf("".cpu %s\n"", cpu_type); printf("".arc_attribute Tag_ARC_PCS_config, %d\n"", 1); printf("".arc_attribute Tag_ARC_ABI_rf16, %d\n"", 0); printf("".arc_attribute Tag_ARC_ABI_pic, %d\n"", 2); printf("".arc_attribute Tag_ARC_ABI_tls, %d\n"", 1); printf("".arc_attribute Tag_ARC_ABI_sda, %d\n"", 0); printf("".arc_attribute Tag_ARC_ABI_exceptions, %d\n"", 1); printf("".arc_attribute Tag_ARC_CPU_variation, %d\n"", 2); printf("".arc_attribute Tag_ARC_ABI_variation, %d\n"", 3); printf("".arc_attribute Tag_ARC_ABI_variation, %d\n"", 4); } ``` This function uses the `printf` function to print out the CPU type and other fancy things at the top of the file. The `printf` function takes a format string and a list of arguments, and it prints out the formatted string to the console. In this case, the format string is a string literal that contains the CPU type and other fancy things, and the arguments are the values of the CPU type and other fancy things. You can call this function at the beginning of your program to set the CPU type and print out other fancy things at the top of the file. For example: ``` int main (void) { arc_file_start(); return 0; }" GCC,arc,290, GCC,arc,291,"void arc_function_arg(uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t arg4) { }" GCC,arc,292,"void arc_function_arg_advance(uint32_t *cum, uint32_t last_arg_reg) { uint32_t reg_number = *cum; while (reg_number <= last_arg_reg) { reg_number++; } *cum = reg_number; }" GCC,arc,293,"bool arc_function_ok_for_sibcall(tree decl) { if (TREE_CODE(decl) == TARGET_EXPR) { tree target = TREE_OPERAND(decl, 0); if (TREE_CODE(target) == IDENTIFIER_NODE) { const char *name = IDENTIFIER_POINTER(target); if (strcmp(name, ""arc"") == 0 || strcmp(name, ""jli_always"") == 0 || strcmp(name, ""jli_fixed"") == 0 || strcmp(name, ""secure_call"") == 0) { return true; } } } if (TREE_CODE(decl) == FUNCTION_DECL && DECL_BUILT_IN(decl)) { return true; } if (TREE_CODE(decl) == FUNCTION_DECL && !DECL_BUILT_IN(decl)) { return true; } return false; }" GCC,arc,294,"arc_function_value (tree type, tree fn_decl_or_type) { if (TREE_CODE (fn_decl_or_type) == FUNCTION_DECL) { if (DECL_RESULT (fn_decl_or_type)) return DECL_RESULT (fn_decl_or_type); type = TREE_TYPE (fn_decl_or_type); } if (POINTER_TYPE_P (type) || REFERENCE_TYPE_P (type)) return build_fold_addr_expr (build_decl (VAR_DECL, NULL_TREE, type)); return build_decl (VAR_DECL, NULL_TREE, type); }" GCC,arc,295,uint32_t arc_get_ccfsm_cond(const struct arc_state *statep) { uint32_t cond = statep->cc; if (statep->cc & ARC_CC_INVERTED) { cond = ~cond; } return cond; } GCC,arc,296,"arc_handle_aux_attribute (tree decl, tree attr) { tree type = TREE_TYPE (decl); tree aux_reg = TREE_VALUE (attr); int reg_num = TREE_INT_CST_LOW (aux_reg); if (!arc_target_aux_reg_p (reg_num)) { error (""invalid aux register number %d"", reg_num); return; } if (!DECL_REGISTER (decl)) { error (""aux attribute only applies to register variables""); return; } if (!SCALAR_INT_MODE_P (TYPE_MODE (type))) { error (""aux attribute only applies to scalar integer variables""); return; } if (TREE_CODE (decl) == CONST_DECL) { error (""aux attribute cannot be applied to constant variables""); return; } if (TREE_CODE (decl) == PARM_DECL) { error (""aux attribute cannot be applied to parameter variables""); return; } if (TREE_CODE (decl) == FUNCTION_DECL) { error (""aux attribute cannot be applied to function variables""); return; } if (TREE_CODE (decl) == LABEL_DECL) { error (""aux attribute cannot be applied to label variables""); return; }" GCC,arc,297," int flags, bool *no_add_attrs) { tree arg; const char *target_specific_value; if (TREE_CODE (node) != FUNCTION_DECL) return; arg = TREE_VALUE (args); if (TREE_CODE (arg) != STRING_CST) { error (""argument of %qE attribute is not a string constant"", name); *no_add_attrs = true; return; } target_specific_value = TREE_STRING_POINTER (arg); if (strcmp (target_specific_value, ""ilink1"") == 0) { } else if (strcmp (target_specific_value, ""ilink2"") == 0) { } else if (strcmp (target_specific_value, ""ilink"") == 0) { } else if (strcmp (target_specific_value, ""firq"") == 0) { } else { error (""argument of %qE attribute is not \""ilink1\"", \""ilink2\"", \""ilink\"", or \""firq\"""", name); *no_add_attrs = true; return; } DECL_ATTRIBUTES (node) = tree_cons (name, args, DECL_ATTRIBUTES (node)); }" GCC,arc,298,"arc_handle_uncached_attribute (tree type, tree name, tree value) { if (TREE_CODE (type) == POINTER_TYPE || TREE_CODE (type) == REFERENCE_TYPE) { if (lookup_attribute (""uncached"", TYPE_ATTRIBUTES (type))) { TREE_THIS_VOLATILE (type) = 1; TREE_READONLY (type) = 1; } } if (TREE_CODE (type) == FUNCTION_TYPE) { TREE_THIS_VOLATILE (type) = 1; TREE_READONLY (type) = 1; } }" GCC,arc,299,"arc_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode) { if (regno >= FIRST_PSEUDO_REGISTER) return false; if (regno < FIRST_STACK_REG || regno > LAST_STACK_REG) return false; if (mode == QImode || mode == HImode || mode == SImode || mode == DImode || mode == TImode) return true; return false; }" GCC,arc,300, GCC,arc,301,"void arc_hazard(uint32_t reg, uint32_t val) { if (reg >= 32) { uint32_t nop = 0x00000000; __asm__ volatile(""nop"" : : ""r""(nop)); } __asm__ volatile(""mov %0, %1"" : ""=r""(reg) : ""r""(val)); }" GCC,arc,302, GCC,arc,303,"typedef struct { uint32_t start; uint32_t end; } arc_trampoline_t; static arc_trampoline_t arc_trampoline_table[1024]; void arc_initialize_trampoline(void) { uint32_t i; for (i = 0; i < 1024; i++) { arc_trampoline_table[i].start = 0; arc_trampoline_table[i].end = 0; } } void arc_add_trampoline(uint32_t start, uint32_t end) { uint32_t i; for (i = 0; i < 1024; i++) { if (arc_trampoline_table[i].start == 0) { arc_trampoline_table[i].start = start; arc_trampoline_table[i].end = end; break; } } } void arc_remove_trampoline(uint32_t start, uint32_t end) { uint32_t i; for (i = 0; i < 1024; i++) { if (arc_trampoline_table[i].start == start && arc_trampoline_table[i].end == end) { arc_trampoline_table[i].start = 0; arc_trampoline_table[i].end = 0; break; } } }" GCC,arc,304,"arc_initial_elimination_offset (rtx_insn *insn, int *offset) { rtx_insn *prev_insn; rtx_insn *next_insn; rtx_insn *target_insn; rtx_insn *replacement_insn; int target_offset; int replacement_offset; prev_insn = insn->prev; next_insn = insn->next; target_insn = insn; target_offset = 0; while (target_insn != NULL && target_insn->code != CODE_LABEL) { target_offset += target_insn->length; target_insn = target_insn->next; } replacement_insn = insn; replacement_offset = 0; while (replacement_insn != NULL && replacement_insn->code != CODE_LABEL) { replacement_offset += replacement_insn->length; replacement_insn = replacement_insn->next; } *offset = target_offset - replacement_offset; }" GCC,arc,305,arc_init_expanders (void) { set_expanders (arc_expanders); } GCC,arc,306, GCC,arc,307,void arc_init_reg_tables(void) { uint32_t i; for (i = 0; i < 32; i++) { arc_mode_class[i] = (i == 0) ? 4 : (i == 1) ? 8 : (i == 2) ? 16 : (i == 3) ? 32 : 0; } } GCC,arc,308,"arc_insn_cost (rtx_insn *insn) { int cost = 0; cost += arc_rtx_cost (insn); for (int i = 0; i < XVECLEN (insn, 0); i++) { rtx set = XVECEXP (insn, 0, i); if (GET_CODE (set) == SET) cost += arc_set_cost (set); } for (int i = 0; i < XVECLEN (insn, 1); i++) { rtx set_src = XVECEXP (insn, 1, i); if (GET_CODE (set_src) == SET_SRC) cost += arc_set_src_cost (set_src); } for (int i = 0; i < XVECLEN (insn, 2); i++) { rtx set_dst = XVECEXP (insn, 2, i); if (GET_CODE (set_dst) == SET_DST) cost += arc_set_dst_cost (set_dst); } return cost; }" GCC,arc,309,"arc_internal_label (const char *prefix, int num) { fprintf (asm_out_file, ""\t.%s%d:\n"", prefix, num); }" GCC,arc,310,"arc_invalid_within_doloop (rtx_insn *insn) { if (INSN_CODE (insn) == CODE_FOR_call) return ""Function call in the loop.""; else return NULL; }" GCC,arc,311,"arc_in_small_data_p (tree decl) { if (TREE_CODE (decl) != VAR_DECL) return false; if (DECL_SECTION_NAME (decl) == NULL_TREE) return false; const char *section_name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl)); if (strcmp (section_name, "".sdata"") == 0 || strcmp (section_name, "".sbss"") == 0) return true; return false; }" GCC,arc,312,"arc_is_aux_reg_p (rtx pat) { if (GET_CODE (pat) == REG) { rtx reg = pat; if (REGNO (reg) >= AUX_REG_FIRST && REGNO (reg) <= AUX_REG_LAST) return TRUE; } return FALSE; } ``` This function uses the `GET_CODE` macro to check if the given pattern is a register, and then checks if the register number is within the range of aux-regs. If it is, the function returns TRUE, otherwise it returns FALSE. You can use this function in your GCC backend to check if a pattern is accessing an aux-reg. For example: ``` if (arc_is_aux_reg_p (pat)) { }" GCC,arc,313,"int arc_is_jli_call_p (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL) { if (lookup_attribute (""jli_always"", DECL_ATTRIBUTES (decl)) || lookup_attribute (""jli_fixed"", DECL_ATTRIBUTES (decl))) { return 1; } } return 0; }" GCC,arc,314,arc_is_longcall_p (rtx_insn *insn) { tree sym_ref = SYM_REF_DECL (insn); if (DECL_LONG_CALL_P (sym_ref) || TARGET_LONG_CALLS) return true; if (DECL_SHORT_CALL_P (sym_ref) || DECL_MEDIUM_CALL_P (sym_ref)) return false; return true; } GCC,arc,315,"int arc_is_secure_call_p (const char *symbol) { if (symbol == NULL) return 0; const char *target_specific_value = ""arc,secure_call""; if (strstr (symbol, target_specific_value) != NULL) return 1; return 0; }" GCC,arc,316,arc_is_shortcall_p (rtx_insn *insn) { if (GET_CODE (insn) != CALL_INSN) return false; rtx_insn *call_insn = insn; rtx_insn *next_insn = NEXT_INSN (insn); if (GET_CODE (next_insn) == BARRIER) return false; if (GET_CODE (next_insn) == JUMP_INSN) return false; if (GET_CODE (next_insn) == CALL_INSN) return false; if (GET_CODE (next_insn) == INSN) return false; return true; } GCC,arc,317,"arc_is_uncached_mem_p (rtx pat) { if (GET_CODE (pat) == MEM) { rtx addr = XEXP (pat, 0); if (GET_CODE (addr) == REG) { unsigned int regno = REGNO (addr); if (regno >= FIRST_UNCACHED_REG && regno <= LAST_UNCACHED_REG) return true; } } return false; }" GCC,arc,318,"arc_label_align (rtx_insn *insn, int align) { if (INSN_ADDRESS (insn) % align == 0) return; rtx_insn *nop = emit_insn_before (gen_nop (), insn); INSN_ADDRESS (nop) = INSN_ADDRESS (insn) - 1; INSN_ADDRESS (insn) = INSN_ADDRESS (nop) + 1; }" GCC,arc,319,"arc_legitimate_constant_p (rtx x) { if (!CONSTANT_P (x)) return false; if (GET_CODE (x) == CONST_STRING && strcmp (XSTR (x, 0), ""arc,0,0,0,1,1,unrecognized supposed constant"") == 0) return true; if (arc_legitimate_constant_p_1 (x)) return true; return false; } static bool arc_legitimate_constant_p_1 (rtx x) { if (GET_CODE (x) == CONST_INT && INTVAL (x) >= 0 && INTVAL (x) <= 255) return true; return false; }" GCC,arc,320,"arc_legitimate_pic_addr_p (rtx addr) { if (GET_CODE (addr) != CONST) return false; if (GET_CODE (XEXP (addr, 0)) != UNSPEC) return false; if (XINT (XEXP (addr, 0), 1) != ARC_UNSPEC_GOTOFF && XINT (XEXP (addr, 0), 1) != ARC_UNSPEC_GOT) return false; if (GET_CODE (XEXP (addr, 1)) != SYMBOL_REF && GET_CODE (XEXP (addr, 1)) != LABEL_REF) return false; return true; }" GCC,arc,321,"arc_legitimize_address_0 (rtx x, machine_mode mode, int strict_p) { if (GET_CODE (x) == REG) return x; if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == REG && GET_CODE (XEXP (x, 1)) == CONST_INT) { HOST_WIDE_INT offset = INTVAL (XEXP (x, 1)); if (offset >= 0 && offset < 256) return x; } if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == REG && GET_CODE (XEXP (x, 1)) == CONST_INT) { HOST_WIDE_INT offset = INTVAL (XEXP (x, 1)); if (offset >= 0 && offset < 256) return x; } return NULL_RTX; }" GCC,arc,322,"arc_legitimize_pic_address (rtx orig, rtx oldx) { rtx addr = orig; if (GET_CODE (addr) == REG) return addr; if (GET_CODE (addr) == SYMBOL_REF) { rtx reg = gen_reg_rtx (Pmode); emit_move_insn (reg, addr); return reg; } if (CONSTANT_P (addr)) { rtx reg = gen_reg_rtx (Pmode); emit_move_insn (reg, addr); return reg; } if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 0)) == REG && CONSTANT_P (XEXP (addr, 1))) { rtx reg = gen_reg_rtx (Pmode); emit_move_insn (reg, addr); return reg; }" GCC,arc,323,"arc_legitimize_tls_address (rtx addr, enum tls_model model) { if (model == TLS_MODEL_ARC1) { addr = plus_constant (addr, ARC_TLS_BLOCK_OFFSET); } else if (model == TLS_MODEL_ARC2) { addr = plus_constant (addr, ARC_TLS_BLOCK_OFFSET); addr = plus_constant (addr, ARC_TLS_SYMBOL_OFFSET); } else { } return addr; }" GCC,arc,324,"arc_lra_p (void) { return TARGET_LRA; } ``` This function simply returns the value of the `TARGET_LRA` macro, which is defined in the ARC target configuration file. If the `TARGET_LRA` macro is defined, then the LRA pass is used instead of the reload pass. You can use this function in your GCC plugin to determine whether the LRA pass is used for the ARC target. For example: ``` if (arc_lra_p()) { } else { }" GCC,arc,325," enum reg_class rclass, int in) { int cost = 0; if (TARGET_MEMORY_MOVE_COST == 2) { if (GET_MODE_SIZE (mode) <= 2) cost = 2; else if (GET_MODE_SIZE (mode) <= 4) cost = 3; else cost = 6; } else if (TARGET_MEMORY_MOVE_COST == 6) { cost = 6; }" GCC,arc,326,arc_modes_tieable_p (machine_mode mode) { switch (mode) { case QImode: case HImode: case SImode: return true; default: return false; } } GCC,arc,327,"arc_mode_dependent_address_p (rtx addr) { if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 0)) == REG && GET_CODE (XEXP (addr, 1)) == CONST_INT) { if (INTVAL (XEXP (addr, 1)) % GET_MODE_SIZE (GET_MODE (addr)) == 0) return true; } return false; }" GCC,arc,328,int arc_must_save_register(int regno) { switch (regno) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 10: case 11: case 12: case 13: case 14: case 15: case 16: case 17: case 18: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: case 28: case 29: case 30: case 31: return 1; default: return 0; } } GCC,arc,329, GCC,arc,330, GCC,arc,331,arc_no_speculation_in_delay_slots_p (void) { if (TARGET_ARC) { if (TARGET_SPECULATION_IN_DELAY_SLOTS) return false; else return true; } else return false; } GCC,arc,332,"int arc_output_addsi (rtx *operands, int cond_p, int output_p) { rtx op0 = operands[0]; rtx op1 = operands[1]; rtx op2 = operands[2]; if (cond_p) { emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_PLUS (SImode, op1, op2))); return 1; } else { emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_PLUS (SImode, op1, op2))); return 0; } }" GCC,arc,333,"void arc_output_libcall(const char *fname) { printf(""arc,64,35,\""add r12,pcl,@%s@pcl\n\tjl%%!%%* [r12]\"",\""jl%%! @%s\"",\""bl%%!%%* @%s\"""", fname, fname, fname); } int main() { arc_output_libcall(""FNAME""); return 0; }" GCC,arc,334,"void arc_output_mi_thunk (int delta, void *function) { asm volatile (""\tld\t%s, [%s]\n"" ""\tadd\t%s, %s, %s\n"" ""\tld\t%s, [%s]\n"" ""\tadd\t%s, %s, %s\n"" ""\tj\t[%s]\n"" : ""=r"" (delta), ""=r"" (function) : ""0"" (delta), ""1"" (function) : ""memory""); }" GCC,arc,335,"void arc_pass_by_reference(void *arg, size_t size) { if (size > 8) { void *ref = arg; __asm__ volatile(""mov %0, %1"" : ""=r""(ref) : ""r""(arg)); } else { __asm__ volatile(""mov %0, %1"" : ""=r""(arg) : ""r""(arg)); } }" GCC,arc,336,"void arc_post_atomic_barrier (int model) { switch (model) { case ARC_POST_ATOMIC_BARRIER_MODEL_ARC: __asm__ volatile ("""" ::: ""memory""); break; case ARC_POST_ATOMIC_BARRIER_MODEL_ARC_V2: __asm__ volatile ("""" ::: ""memory""); break; case ARC_POST_ATOMIC_BARRIER_MODEL_ARC_V3: __asm__ volatile ("""" ::: ""memory""); break; default: break; } }" GCC,arc,337, GCC,arc,338,"arc_preferred_simd_mode (enum machine_mode mode, enum machine_mode *mode_out) { if (mode == VOIDmode || mode == BLKmode) return mode; if (mode == QImode || mode == HImode) return SImode; if (mode == SImode) return V4SImode; if (mode == DImode) return V2DImode; if (mode == TImode) return V2TImode; if (mode == OImode) return V4OImode; if (mode == CImode) return V4CImode; if (mode == XImode) return V4XImode; if (mode == PImode) return V4PImode; if (mode == HFmode) return V2HFmode; if (mode == SFmode) return V4SFmode; if (mode == DFmode) return V2DFmode; if (mode == XFmode) return V2XFmode; if (mode == TFmode) return V2TFmode; if (mode == SCmode) return V4SCmode; if (mode == DCmode) return V2DCmode; if (mode == XCmode) return V2XCmode; if (mode == TCmode) return V2TCmode; return mode; }" GCC,arc,339,"void arc_pre_atomic_barrier(uint32_t model) { switch (model) { case ARC_MODEL_ARC700: __asm__ volatile(""sync"" ::: ""memory""); break; case ARC_MODEL_ARC600: __asm__ volatile(""sync"" ::: ""memory""); break; case ARC_MODEL_ARC500: __asm__ volatile(""sync"" ::: ""memory""); break; default: break; } }" GCC,arc,340,"void arc_print_operand_address(const char *operand) { printf(""%s"", operand); }" GCC,arc,341,"void arc_process_double_reg_moves(uint32_t *dest, uint32_t *src) { uint32_t temp; if (src[0] == 0 && src[1] == 0) { return; }" GCC,arc,342,"arc_raw_symbolic_reference_mentioned_p (rtx op, bool skip_local) { if (GET_CODE (op) == SYMBOL_REF) { if (skip_local && SYMBOL_REF_LOCAL_P (op)) return false; return true; } else if (GET_CODE (op) == UNSPEC) { if (XINT (op, 1) == UNSPEC_ARC) return false; return true; } return false; }" GCC,arc,343,"arc_register_move_cost (rtx src, rtx dest, int mode) { int cost = 0; if (REGNO (src) == REGNO (dest)) return 0; if (REGNO (src) / 100 == REGNO (dest) / 100) return 100; if (REGNO (src) / 100 != REGNO (dest) / 100) return 200; return cost; }" GCC,arc,344,"arc_regno_use_in (rtx x, int regno) { if (GET_CODE (x) == REG && REGNO (x) == regno) return x; if (GET_CODE (x) == PLUS && REGNO (XEXP (x, 0)) == regno) return XEXP (x, 0); if (GET_CODE (x) == MINUS && REGNO (XEXP (x, 0)) == regno) return XEXP (x, 0); return NULL_RTX; }" GCC,arc,345, GCC,arc,346,"void arc_restore_callee_saves(void) { __asm__ volatile(""ld %0, [%1]"" : ""=r"" (reg) : ""r"" (®)); __asm__ volatile(""ldd %0, [%1]"" : ""=r"" (reg) : ""r"" (®)); } ``` This function uses the `volatile` keyword to ensure that the assembly code is not optimized away by the compiler. The `ld` and `ldd` instructions are used to restore the values of the call-saved registers. The `arc,0,0,0,0,0,0,2,0,1ULL,0,1ULL,1,0,1ULL,0,0,1,4,4,0,0` target specific value is used to specify the registers that need to be restored. The `arc` instruction is used to specify the registers that need to be restored, and the `0` values are used to indicate that the registers should be restored to their original values. Here's an example of how this function could be used: ``` int main(void) { arc_restore_callee_saves(); int a = 1; int b = 2; int c = a + b; return c; }" GCC,arc,347,int arc_return_address_register(TYPE type) { switch (type) { case ARC_RETURN_ADDRESS_REGISTER: return 0; case ARC_RETURN_ADDRESS_REGISTER_2: return 1; case ARC_RETURN_ADDRESS_REGISTER_3: return 2; case ARC_RETURN_ADDRESS_REGISTER_4: return 3; default: return -1; } } GCC,arc,348,"arc_return_addr_rtx (rtx op) { rtx return_addr = gen_rtx_REG (Pmode, ARC_RETURN_ADDR_REGNUM); if (return_addr == NULL) return NULL; return return_addr; }" GCC,arc,349,"arc_return_in_memory (rtx op, int mode, int regno) { if (mode == SImode && regno == 1) { rtx mem = gen_rtx_MEM (SImode, op); rtx reg = gen_rtx_REG (SImode, regno); emit_move_insn (reg, mem); } else if (mode == DImode && regno == 1) { rtx mem = gen_rtx_MEM (DImode, op); rtx reg = gen_rtx_REG (DImode, regno); emit_move_insn (reg, mem); } else { } }" GCC,arc,350, GCC,arc,351, GCC,arc,352,"void arc_save_callee_saves(void) { ST 0, 0, 0, 1, 2, 0, 1ULL, 0, 1ULL, 1, 0, 1ULL, 0, 0, 0; }" GCC,arc,353,bool arc_scheduling_not_expected(void) { if (arc_scheduling_pass_run) { return true; } if (arc_target_specific_value == ARC_TARGET_SPECIFIC_VALUE) { return true; } return false; } GCC,arc,354,"arc_sched_adjust_priority (rtx_insn *insn, int priority) { if (GET_CODE (insn) == SET && GET_CODE (XEXP (insn, 0)) == REG && GET_CODE (XEXP (insn, 1)) == MEM && GET_CODE (XEXP (XEXP (insn, 1), 0)) == PLUS && GET_CODE (XEXP (XEXP (XEXP (insn, 1), 0), 0)) == REG && GET_CODE (XEXP (XEXP (XEXP (insn, 1), 0), 1)) == CONST_INT && INTVAL (XEXP (XEXP (XEXP (insn, 1), 0), 1)) == 0) { priority = 100; } else if (GET_CODE (insn) == SET && GET_CODE (XEXP (insn, 0)) == REG && GET_CODE (XEXP (insn, 1)) == MEM && GET_CODE (XEXP (XEXP (insn, 1), 0)) == PLUS && GET_CODE (XEXP (XEXP (XEXP (insn, 1), 0), 0)) == REG && GET_CODE (XEXP (XEXP (XEXP (insn, 1), 0), 1)) == CONST_INT && INTVAL (XEXP (XEXP (XEXP (insn, 1), 0), 1)) == 0) { priority = 100; }" GCC,arc,355,arc_sched_issue_rate (void) { if (arc_target_info->issue_rate) { return arc_target_info->issue_rate; } else { return 1; } } GCC,arc,356, GCC,arc,357,"arc_secondary_reload_conv (rtx op, rtx *operands, int *offsets, int num_operands) { int i; for (i = 0; i < num_operands; i++) { rtx op = operands[i]; int offset = offsets[i]; if (offset >= 0x8000) { rtx mem = gen_rtx_MEM (SImode, gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 0), GEN_INT (offset))); emit_insn (gen_rtx_SET (VOIDmode, op, mem)); } else { emit_insn (gen_rtx_SET (VOIDmode, op, gen_rtx_MEM (SImode, gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 0), GEN_INT (offset))))); } } }" GCC,arc,358,"arc_sets_cc_p (rtx_insn *insn) { if (insn->code == ARC_INSN_CODE_CC) return true; else return false; } ``` This function takes an `rtx_insn` pointer as an argument, which represents an instruction in the ARC instruction set. The function checks the instruction's opcode to see if it is a condition code instruction (i.e., has the `ARC_INSN_CODE_CC` opcode). If it is, the function returns `true`, otherwise it returns `false`. You can use this function in your GCC backend to determine if an instruction sets the condition codes. For example, you can use it in a function that checks the condition codes after an instruction has been executed: ``` void arc_check_cc (rtx_insn *insn) { if (arc_sets_cc_p (insn)) { } else { } }" GCC,arc,359, GCC,arc,360,"bool arc_short_comparison_p (tree comparison) { if (TREE_CODE (comparison) == ARC_COMPARISON) { tree lhs = TREE_OPERAND (comparison, 0); tree rhs = TREE_OPERAND (comparison, 1); if (TREE_CODE (lhs) == ARC_REGISTER && TREE_CODE (rhs) == ARC_REGISTER) { if (lhs == rhs) return true; } } return false; } ``` This function first checks if the `COMPARISON` argument is a `ARC_COMPARISON` node. If it is, it extracts the left-hand side (LHS) and right-hand side (RHS) operands of the comparison. If the LHS and RHS operands are both `ARC_REGISTER` nodes, it checks if they are the same register. If they are, it returns `true`. Otherwise, it returns `false`. You can use this function in your GCC backend to determine if a comparison has a short form that can accommodate an offset. For example: ``` if (arc_short_comparison_p (comparison)) { } else { }" GCC,arc,361,"void arc_short_long(const char *insn, const char *s_tmpl, const char *l_tmpl) { int arc,1; char *output; if (strcmp(insn, ""arc,1"") == 0) { arc,1 = 1; } else { arc,1 = 0; } if (arc,1) { output = l_tmpl; } else { output = s_tmpl; } printf(""%s\n"", output); } int main() { arc_short_long(""arc,1"", ""short"", ""long""); return 0; }" GCC,arc,362,"static inline void arc_split_compare_and_swap(int *ptr, int oldval, int newval) { int tmp; __asm__ __volatile__ ( ""ld.w %0, %1\n\t"" ""cmp.w %0, %2\n\t"" ""bne.w %3\n\t"" ""mov.w %4, %0\n\t"" ""st.w %0, %1\n\t"" : ""=&r"" (tmp), ""=m"" (*ptr) : ""r"" (oldval), ""r"" (newval), ""r"" (ptr) : ""memory"" ); }" GCC,arc,363,"void arc_split_ior(uint32_t mask, uint32_t rA, uint32_t rB) { uint32_t mask1, mask2, mask3; if (mask & 0xffffffff) { mask1 = mask & 0x3f; mask2 = (mask >> 6) & 0x3f; mask3 = (mask >> 12) & 0x3f; BSET_S(rA, rB, mask1); BSET_S(rA, rA, mask2); BSET_S(rA, rA, mask3); } else if ((mask & 0x3f) && ((mask >> 6) & 0x3f) == 0) { mask1 = mask & 0x3f; mask2 = (mask >> 6) & 0x3f; BSET_S(rA, rB, mask1); OR(rA, rB, mask2); } else { OR(rA, rB, mask); } }" GCC,arc,364,"arc_split_move_p (rtx op, machine_mode mode) { if (mode == DImode && TARGET_ARC) { if (TARGET_ARC_VADD2 || TARGET_ARC_LDST) return false; return true; } return false; }" GCC,arc,365,"void arc_split_mov_const(uint32_t *dest, uint32_t *src, uint32_t size) { uint32_t i, j, k; uint32_t mask = 0xffffffff; uint32_t shift = 0; for (i = 0; i < size; i++) { for (j = 0; j < 4; j++) { k = (i * 4) + j; if (k < size) { dest[k] = (src[i] >> (j * 8)) & mask; } } } }" GCC,arc,366, GCC,arc,367,"arc_store_addr_hazard_p (rtx_insn *consumer, rtx_insn *producer) { if (GET_CODE (consumer) != MEM) return false; if (GET_CODE (producer) != MEM) return false; if (XEXP (consumer, 0) == XEXP (producer, 0)) return true; return false; }" GCC,arc,368,"arc_symbol_binds_local_p (rtx x) { if (GET_CODE (x) == SYMBOL_REF) { const char *name = XSTR (x, 0); if (name[0] == '.' || name[0] == '$') return true; } return false; }" GCC,arc,369,"bool arc_text_label(const char *label) { if (!label || !label[0]) { return false; } if (strstr(label, ""arc"") == NULL) { return false; } return true; }" GCC,arc,370,"void arc_tolower(uint32_t *lo, uint32_t *hi) { uint32_t temp = *lo; *lo = temp & 0xDFDFDFDF; *hi = temp >> 8; }" GCC,arc,371, GCC,arc,372,static inline uint32_t arc_unspec_offset(uint32_t offset) { return offset + 1; } GCC,arc,373,arc_use_anchors_for_symbol_p (rtx symbol) { if (GET_MODE_SIZE (GET_MODE (symbol)) <= UNITS_PER_WORD) return false; if (GET_CODE (symbol) == PC) return false; if (GET_CODE (symbol) == TLS) return false; return true; } GCC,arc,374,arc_use_by_pieces_infrastructure_p (void) { if (TARGET_ARC) { if (TARGET_BY_PIECES_INFRASTRUCTURE) return true; } return false; } GCC,arc,375,arc_vector_mode_supported_p (machine_mode mode) { if (mode == VOIDmode) return false; switch (mode) { case QImode: case HImode: case SImode: case DImode: case TImode: case OImode: case XImode: return true; default: return false; } } GCC,arc,376,"int arc_verify_short(rtx_insn *insn, int unalign, int check_attr) { if (unalign == 0 && check_attr > 0) return 1; if (unalign != 0 && check_attr <= 0) return 0; return check_attr; }" GCC,arc,377,"arc_warn_func_return (rtx_insn *insn) { if (TREE_CODE (DECL_RESULT (current_function_decl)) != VOID_TYPE) { if (TREE_CODE (insn) == RETURN_EXPR) { if (TREE_OPERAND (insn, 0) != NULL_TREE) { if (TREE_TYPE (TREE_OPERAND (insn, 0)) != TREE_TYPE (DECL_RESULT (current_function_decl))) warning (OPT_Wreturn_type, ""function returns a value that is not the same as its return type""); } } } }" GCC,arc,378,int arc_write_ext_corereg(rtx insn) { if (GET_CODE(insn) == INSN && GET_MODE(insn) == QImode) { rtx dest = SET_DEST(insn); if (GET_CODE(dest) == REG && REGNO(dest) >= 32 && REGNO(dest) <= 61) { return 1; } } return 0; } GCC,arc,379,"int branch_dest(rtx_insn *insn) { rtx_insn *dest; dest = get_attr_dest(insn); if (dest && GET_CODE(dest) == CODE_LABEL) { dest = XEXP(dest, 0); } return dest; } ``` This function takes an `rtx_insn` pointer as an argument, which represents the branch instruction. It then uses the `get_attr_dest` function to get the destination address of the branch instruction. If the destination address is a branch instruction, it gets the address of the branch instruction using the `XEXP` function. Finally, it returns the destination address of the branch instruction. You can use this function in your ARC backend of the GCC compiler to get the destination address of a branch instruction. For example, you can call this function in the `arc_expand_branch` function to get the destination address of a branch instruction and then use it to generate the appropriate branch instruction. Here is an example of how you can use this function in the `arc_expand_branch` function: ``` void arc_expand_branch(rtx_insn *insn) { int dest; dest = branch_dest(insn); if (dest) { emit_jump_insn(gen_jump(dest)); } else { emit_jump_insn(gen_jump(insn)); } }" GCC,arc,380,"bool check_if_valid_regno_const(int opno) { if (opno >= 0 && opno < 16) { return true; } else { error(""register number must be a compile-time constant""); return false; } }" GCC,arc,381,"check_store_cacheline_hazard (rtx_insn *insn) { rtx_insn *prev_insn; rtx_insn *next_insn; int num_loads; if (GET_CODE (insn) != SET || GET_CODE (SET_DEST (insn)) != MEM) return; prev_insn = PREV_INSN (insn); next_insn = NEXT_INSN (insn); if (prev_insn && GET_CODE (prev_insn) == SET && GET_CODE (SET_SRC (prev_insn)) == MEM) { if (next_insn && GET_CODE (next_insn) == SET && GET_CODE (SET_SRC (next_insn)) == MEM) { num_loads = 2; if (num_loads > arc_get_num_stores (insn)) { emit_insn_after (gen_nop (), insn); } } } }" GCC,arc,382," int unsignedp, int volatilep) { if (GET_CODE (op) == MEM) { rtx addr = XEXP (op, 0); if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 0)) == REG && REGNO (XEXP (addr, 0)) == SDA_BASE_REGNUM && GET_CODE (XEXP (addr, 1)) == CONST_INT) { HOST_WIDE_INT offset = INTVAL (XEXP (addr, 1)); if (offset >= 0 && offset < 4096) { fputs (""["", asm_out_file); output_addr_const (asm_out_file, addr); fputs (""]"", asm_out_file); return; } } } output_operand_lossage (""invalid memory operand for `compact_sda_memory_operand'""); }" GCC,arc,383,"conditionalize_nonjump (rtx insn, rtx cond, bool annulled) { rtx pat = PATTERN (insn); rtx new_pat; if (annulled) { new_pat = gen_rtx_COND (VOIDmode, cond, gen_rtx_ANNUL (VOIDmode, pat), gen_rtx_ANNUL (VOIDmode, pat)); } else { new_pat = gen_rtx_COND (VOIDmode, cond, gen_rtx_ANNUL (VOIDmode, pat), pat); } return new_pat; }" GCC,arc,384,"disi_highpart (rtx in) { rtx out; if (GET_MODE (in) == DImode) { out = gen_rtx_REG (SImode, arc_0); emit_move_insn (out, gen_rtx_HIGHPART (SImode, in)); } else { out = in; } return out; }" GCC,arc,385,"void emit_shift(rtx op0, rtx op1, rtx op2, int code) { switch (code) { case ARC_SHIFT_LEFT: emit_insn(gen_ashl(op0, op1, op2)); break; case ARC_SHIFT_RIGHT: emit_insn(gen_ashr(op0, op1, op2)); break; case ARC_SHIFT_UNSIGNED_RIGHT: emit_insn(gen_lshr(op0, op1, op2)); break; default: error(""Invalid shift code""); break; } }" GCC,arc,386,"emit_unlikely_jump (rtx_insn *insn) { if (GET_CODE (insn) == JUMP_INSN) { rtx_insn *prev_insn = PREV_INSN (insn); if (prev_insn && GET_CODE (prev_insn) == JUMP_INSN) { rtx_insn *target = JUMP_LABEL (prev_insn); if (target && GET_CODE (target) == LABEL_REF) { rtx_insn *label = as_a (XEXP (target, 0)); if (label && GET_CODE (label) == CODE_LABEL) { rtx_insn *jump = emit_jump_insn (gen_rtx_JUMP (VOIDmode, label)); RTX_FRAME_RELATED_P (jump) = 1; emit_barrier (); } } } } }" GCC,arc,387,"void execute(void) { arc_init(); arc_optimize(); arc_generate_code(); arc_cleanup(); } ``` This function would be called by the GCC compiler to execute the ARC backend pass. It would initialize the target specific values, optimize the code, generate the code, and then clean up any resources used by the pass. You can also add some error handling and logging to the function to make it more robust. ``` void execute(void) { arc_init(); arc_optimize(); arc_generate_code(); arc_cleanup(); if (arc_error_occurred()) { arc_print_error(); arc_cleanup(); return; } arc_print_success(); }" GCC,arc,388,"void frame_add (rtx reg, rtx offset) { emit_insn (gen_frame_adjust (reg, offset)); }" GCC,arc,389,frame_insn (rtx_insn *insn) { emit_insn (insn); insn->frame_related = 1; insn->emit_frame_insn = 1; } GCC,arc,390,"void frame_move(rtx src, rtx dst) { rtx insn = gen_frame_move(src, dst); emit_insn(insn); }" GCC,arc,391,"frame_move_inc (rtx dest, rtx src, rtx addr, int offset) { if (REG_P (dest) && REG_P (src) && REG_INC (src)) { if (ADDR_P (addr) && (ADDR_AUTOINC (addr) || ADDR_ZERO (addr))) { rtx note = gen_rtx_REG_INC (VOIDmode, REGNO (dest)); REG_NOTES (dest) = alloc_EXPR_LIST (REG_INC, note, REG_NOTES (dest)); } } frame_move (dest, src, addr, offset); }" GCC,arc,392,"frame_restore_reg (rtx reg, HOST_WIDE_INT offset) { rtx mem; if (offset == 0) return; if (offset < 0) { emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-offset))); offset = -offset; } mem = gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, offset)); emit_move_insn (reg, mem); }" GCC,arc,393,"void frame_save_reg (int reg, int offset) { emit_insn (gen_frame_store (reg, offset)); }" GCC,arc,394,"frame_stack_add (HOST_WIDE_INT offset) { rtx insn; insn = emit_insn (gen_frame_stack_add (offset)); emit_move_insn (frame_pointer_rtx, stack_pointer_rtx); emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (offset))); add_insn (insn, get_insns ()); }" GCC,arc,395,"gen_acc1 (void) { return gen_rtx_REG (SImode, arc, 56); }" GCC,arc,396,"gen_acc2 (void) { rtx reg = gen_rtx_REG (arc, 57, 56); return reg; }" GCC,arc,397,"bool gen_operands_ldd_std(int operands[], bool load) { if (operands[0] != operands[2] || operands[1] != operands[3]) { return false; } if (operands[2] - operands[0] != 4 || operands[3] - operands[1] != 4) { return false; } if (load) { operands[0] = operands[2]; operands[1] = operands[3]; } else { operands[2] = operands[0]; operands[3] = operands[1]; } return true; }" GCC,arc,398, GCC,arc,399,"void hwloop_fail(rtx_insn *insn, rtx_insn *loop_end) { rtx_insn *subtract = gen_rtx_SUB(VOIDmode, loop_end, insn); rtx_insn *test = gen_rtx_TEST(VOIDmode, subtract); emit_insn_before(subtract, loop_end); emit_insn_before(test, loop_end); loop_end->set_operand(0, test); }" GCC,arc,400,hwloop_pattern_reg (rtx insn) { if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET && GET_CODE (SET_SRC (PATTERN (insn))) == REG && GET_CODE (SET_DEST (PATTERN (insn))) == REG && REG_LOOP_COUNTER_P (SET_SRC (PATTERN (insn))) && REG_LOOP_COUNTER_P (SET_DEST (PATTERN (insn)))) { return SET_SRC (PATTERN (insn)); } return NULL_RTX; } GCC,arc,401,"static void irq_range(const char *range_str, uint32_t *start, uint32_t *end) { char *dash = strchr(range_str, '-'); if (!dash) { fprintf(stderr, ""missing dash in range string\n""); exit(1); } char *first_reg_str = range_str; char *last_reg_str = dash + 1; uint32_t first_reg = arc_reg_name_to_num(first_reg_str); uint32_t last_reg = arc_reg_name_to_num(last_reg_str); if (first_reg == ARC_REG_R0) { fprintf(stderr, ""first register must be R0\n""); exit(1); } if (last_reg == ARC_REG_FP) { fprintf(stderr, ""last register must be an odd register\n""); exit(1); } if (first_reg > last_reg) { fprintf(stderr, ""invalid range: %s-%s\n"", first_reg_str, last_reg_str); exit(1); } *start = first_reg; *end = last_reg; } int main(int argc, char **argv) { if (argc != 2) { fprintf(stderr, ""usage: %s \n"", argv[0]); return 1; }" GCC,arc,402,void jli_call_scan(void) { struct function *func = get_current_function(); struct instruction *call_insn = get_first_call_insn(func); while (call_insn) { struct function *called_func = get_called_function(call_insn); if (called_func->section == JLI_SECTION) { add_symbol_to_jli_section(called_func); } call_insn = get_next_call_insn(call_insn); } } GCC,arc,403," addr_space_t as ATTRIBUTE_UNUSED) { if (GET_CODE (addr) == PLUS) { rtx base = XEXP (addr, 0); rtx offset = XEXP (addr, 1); if (GET_CODE (base) == REG && GET_CODE (offset) == REG) return true; if (GET_CODE (base) == REG && GET_CODE (offset) == CONST_INT) { HOST_WIDE_INT val = INTVAL (offset); if (val >= 0 && val < 4096) return true; } if (GET_CODE (base) == REG && GET_CODE (offset) == CONST_DOUBLE) { HOST_WIDE_INT val = CONST_DOUBLE_LOW (offset); if (val >= 0 && val < 4096) return true; } } return false; }" GCC,arc,404,"legitimate_scaled_address_p (rtx op) { if (GET_CODE (op) != MEM) return false; rtx addr = XEXP (op, 0); if (GET_CODE (addr) != PLUS) return false; rtx base = XEXP (addr, 0); if (GET_CODE (base) != REG) return false; rtx index = XEXP (addr, 1); if (GET_CODE (index) != REG) return false; if (GET_MODE (base) != GET_MODE (index)) return false; if (GET_MODE_SIZE (GET_MODE (base)) != 4) return false; if (GET_MODE_SIZE (GET_MODE (index)) != 4) return false; if (GET_CODE (XEXP (index, 0)) != CONST_INT) return false; HOST_WIDE_INT scale = INTVAL (XEXP (index, 0)); if (scale != 1 && scale != 2 && scale != 4 && scale != 8) return false; return true; }" GCC,arc,405, GCC,arc,406,"mem_ok_for_ldd_std (rtx mem, rtx *base, rtx *offset) { if (GET_CODE (mem) != MEM) return false; rtx addr = XEXP (mem, 0); if (GET_CODE (addr) != PLUS) return false; rtx base_reg = XEXP (addr, 0); if (GET_CODE (base_reg) != REG) return false; rtx offset_imm = XEXP (addr, 1); if (GET_CODE (offset_imm) != CONST_INT) return false; *base = base_reg; *offset = offset_imm; return true; }" GCC,arc,407,next_nonnote_insn_bb (rtx insn) { while (insn && !NOTE_INSN_P (insn)) { if (BLOCK_FOR_INSN (insn) != current_block) return NULL_RTX; insn = NEXT_INSN (insn); } return insn; } GCC,arc,408,static bool operands_ok_ldd_std(rtx *operands) { if (!REG_P(operands[0]) || !REG_P(operands[1])) return false; if (GET_CODE(operands[2]) != REG || GET_MODE(operands[2]) != SImode) return false; if (GET_CODE(operands[3]) != CONST_INT || (INTVAL(operands[3]) < -4 || INTVAL(operands[3]) > 4)) return false; if (GET_CODE(operands[4]) != CONST_INT || (INTVAL(operands[4]) != 4)) return false; if (reload_completed && !LEGAL_REG_P(operands[0]) && !LEGAL_REG_P(operands[1])) return false; return true; } GCC,arc,409, GCC,arc,410,"output_short_suffix (rtx insn) { if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET && GET_CODE (SET_SRC (PATTERN (insn))) == REG && GET_CODE (SET_DEST (PATTERN (insn))) == REG && GET_MODE (SET_SRC (PATTERN (insn))) == GET_MODE (SET_DEST (PATTERN (insn))) && GET_MODE_SIZE (GET_MODE (SET_SRC (PATTERN (insn)))) <= 2) { fputs (""_s"", asm_out_file); cfun->machine->unalign = 1; } }" GCC,arc,411,"void pad_return(void) { if (__builtin_return_address() - __builtin_return_address() == 6) { __asm__(""nop""); } } ``` This function uses the `__builtin_return_address` intrinsic to get the return address of the current function, and then checks if it is 6 bytes away from the current address. If it is, it adds a `nop` instruction to the code to pad the return address and avoid a mispredict. You can use this function in your ARC backend code by calling it at the beginning of each function that needs to avoid a mispredict. For example: ``` void foo(void) { pad_return(); }" GCC,arc,412,"parse_mrgf_banked_regs_option (const char *option) { int num; if (sscanf (option, ""%d"", &num) != 1) error (""invalid number in %<-mrgf-banked-regs=%s%>"", option); if (num != 4 && num != 8 && num != 16 && num != 32) error (""valid values are 0, 4, 8, 16, or 32""); arc_mrgf_banked_regs = num; }" GCC,arc,413,"static inline void pop_reg(void) { __asm__ volatile(""mov %0, %1"" : ""=r""(arc) : ""r""(1)); }" GCC,arc,414, GCC,arc,415,"void split_addsi(int a, int b, int c, int d) { int e = a + b; int f = c + d; int g = e + f; return g; }" GCC,arc,416,"split_subsi (rtx *operands, int *len) { rtx op0 = operands[0]; rtx op1 = operands[1]; rtx op2 = operands[2]; rtx op3 = operands[3]; rtx op4 = operands[4]; if (GET_CODE (op1) == CONST_INT && INTVAL (op1) >= 0 && INTVAL (op1) <= 255) { emit_insn (gen_subsi3 (op0, op1, op2)); *len = 1; } else { rtx reg = gen_reg_rtx (SImode); emit_insn (gen_movsi (reg, op1)); emit_insn (gen_subsi3 (op0, reg, op2)); *len = 2; } }" GCC,arc,417,"symbolic_reference_mentioned_p (rtx op) { if (GET_CODE (op) == SYMBOL_REF) return true; if (GET_CODE (op) == CONST) { rtx inner = XEXP (op, 0); if (GET_CODE (inner) == SYMBOL_REF) return true; } return false; } ``` This function uses the `GET_CODE` macro to check the type of the operand `OP`. If the operand is a `SYMBOL_REF`, the function returns `true`. If the operand is a `CONST`, the function checks the type of the inner operand `XEXP (op, 0)`. If the inner operand is a `SYMBOL_REF`, the function returns `true`. Otherwise, the function returns `false`. You can use this function in your GCC backend to check if an operand contains a symbol reference. For example: ``` if (symbolic_reference_mentioned_p (operand)) { }" GCC,arc,418,"static void workaround_arc_anomaly(void) { if (arc_hazard_p()) { emit_insn(gen_nop()); } } ``` This function checks if the `arc_hazard` is present, and if it is, it inserts a `NOP` instruction to ensure that the branch is not shortened. The `arc_hazard` is a target-specific value that indicates the presence of a hazard in the ARC architecture. You can call this function in the machine reorg phase of the GCC compiler by adding the following code to your machine reorg pass: ``` void machine_reorg(void) { workaround_arc_anomaly(); }" GCC,arc,419,"char* arc_cpu_to_as(const char* cpu) { if (strcmp(cpu, ""arcem"") == 0) { return ""-mcode-density -mcpu=arcem""; } else if (strcmp(cpu, ""archs"") == 0) { return ""-mcode-density -mcpu=archs""; } else if (strcmp(cpu, ""nps400"") == 0) { return ""-mcode-density -mcpu=nps400 -mEA""; } else if (strcmp(cpu, ""arc700"") == 0) { return ""-mcode-density -mcpu=arc700 -mEA""; } else if (strcmp(cpu, ""arc600"") == 0) { return ""-mcode-density -mcpu=arc600 -mmul64 -mnorm""; } else if (strcmp(cpu, ""arc600-dsp-packa"") == 0) { return ""-mcode-density -mcpu=arc600 -mdsp-packa -mnorm""; } else { return NULL; } }" LLVM,NVPTX,0,"const char* getPassName(const Pass* pass) { if (pass->getType() == Pass::NVPTX) { return ""Add implicit SYCL global offset""; } else { return pass->getName(); } }" LLVM,NVPTX,1,"void runOnModule(Module &M) override { auto NVPTX = M.getTargetSpecificValue(); for (auto &F : M.functions()) { for (auto &I : F.instructions()) { if (I.isIntrinsic()) { auto IntrinsicName = I.getIntrinsicName(); if (IntrinsicName == ""nvvm_implicit_offset"") { auto IntrinsicType = I.getIntrinsicType(); if (IntrinsicType != NVPTX.getImplicitOffsetType()) { M.getContext().emitError(""Intrinsic::nvvm_implicit_offset does not return the expected type""); } } } } } }" LLVM,NVPTX,2,"void runOnModule(Module &M) override { const TargetTriple &TT = M.getTargetTriple(); if (TT.getArch() != Triple::nvptx) { return; } MDNode *Annotations = M.getOrInsertNamedMetadata(""nvvm.annotations""); for (Function &F : M.getFunctionList()) { if (F.hasFnAttribute(""kernel"")) { MDNode *Annotations = F.getOrInsertNamedMetadata(""nvvm.annotations""); Annotations->addOperand(MDString::get(M.getContext(), ""NVPTX"")); Annotations->addOperand(MDString::get(M.getContext(), ""nvvm.annotations"")); Annotations->addOperand(MDString::get(M.getContext(), ""IR compiled to PTX must have nvvm.annotations"")); Annotations->addOperand(MDString::get(M.getContext(), ""3"")); Annotations->addOperand(MDString::get(M.getContext(), ""1"")); Annotations->addOperand(MDString::get(M.getContext(), ""kernel"")); Annotations->addOperand(MDString::get(M.getContext(), ""0"")); Annotations->addOperand(MDString::get(M.getContext(), ""0"")); } } }" LLVM,NVPTX,3,"const char *getPassName(const Pass *P) { if (P->getTarget() == ""NVPTX"") { if (P->getTargetSpecificValue() == ""localaccessortosharedmemory"") { return ""Local Accessor to Shared Memory""; } } return P->getName(); }" LLVM,NVPTX,4,"void runOnModule(Module &M) override { TargetSpecificValue *TSValue = M.getTargetSpecificValue(""nvvm.annotations"", 3, 1, ""kernel"", 0, 0); if (!TSValue) return; Function *KernelFunc = TSValue->getFunction(); if (!KernelFunc) return; }" LLVM,NVPTX,5,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,6,"const char *getPassName(const Pass *P) { if (P->getType() == NVPTX) { return ""NVPTX specific alloca hoisting""; } else { return P->getName(); } }" LLVM,NVPTX,7,class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} virtual bool runOnFunction(Function &F) { return false; } LLVM,NVPTX,8,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,9,"const char *getPassName(const Pass *P) { if (P->getType() == NVPTX) { return ""NVPTX specific alloca hoisting""; } else { return P->getName(); } }" LLVM,NVPTX,10,class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} virtual bool runOnFunction(Function &F) { return false; } LLVM,NVPTX,11,void getAnalysisUsage(AnalysisUsage &AU) { NVPTXBackendBase::getAnalysisUsage(AU); AU.addRequired(); AU.addRequired(); } LLVM,NVPTX,12,class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} virtual bool runOnFunction(Function &F) { return false; } LLVM,NVPTX,13,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,14,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,15,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,16,void getAnalysisUsage(AnalysisUsage &AU) { NVPTXBackendBase::getAnalysisUsage(AU); AU.addRequired(); AU.addRequired(); } LLVM,NVPTX,17,"const char *getPassName(const Pass *P) { if (P->getType() == NVPTX) { return ""NVPTX specific alloca hoisting""; } else { return P->getName(); } }" LLVM,NVPTX,18,"void doFinalization(LLVMContext& Context, NVPTXTargetMachine& TargetMachine) { }" LLVM,NVPTX,19,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(NVPTX::CUDA); AP->setFileScopeInlineAssembly(true); AP->setFileScopeInlineAssemblyStart(""Start of file scope inline assembly""); AP->setFileScopeInlineAssemblyEnd(""End of file scope inline assembly""); M->setAsmPrinter(AP); }" LLVM,NVPTX,20,"void EmitInstruction(LLVMContext &Context, Instruction *Inst) { HazardState *State = getHazardState(Inst); switch (Inst->getOpcode()) { case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::Div: State->setWarp(Inst->getParent()->getWarp()); break; case Instruction::Load: case Instruction::Store: State->setWarp(Inst->getParent()->getWarp()); break; case Instruction::Call: State->setWarp(Inst->getParent()->getWarp()); break; case Instruction::Ret: State->setWarp(Inst->getParent()->getWarp()); break; default: State->setWarp(nullptr); break; } State->advance(); }" LLVM,NVPTX,21, LLVM,NVPTX,22,"define void @print(i32 %stream, i32 %value) #0 { %1 = call i32 @llvm.nvptx.print(i32 %stream, i32 %value) ret void } declare i32 @llvm.nvptx.print(i32, i32) #1 attributes #0 = { nounwind } attributes #1 = { nounwind } ``` This function takes two arguments: `%stream`, which is the stream to print to, and `%value`, which is the value to print. It calls the `@llvm.nvptx.print` function, which is a target-specific function that prints the value to the stream. The `nounwind` attribute is added to the function to indicate that it does not throw any exceptions. The `llvm.nvptx.print` function is defined as follows: ``` define i32 @llvm.nvptx.print(i32 %stream, i32 %value) #1 { %1 = call i32 @llvm.nvptx.print.i32(i32 %stream, i32 %value) ret i32 %1 }" LLVM,NVPTX,23,"void emitGlobals(Module *M) { std::vector globals = M->getGlobalVariables(); for (auto global : globals) { Type *type = global->getType(); uint64_t size = type->getSize(); void *memory = malloc(size); GlobalAddress[global->getName()] = memory; if (global->hasInitializer()) { Constant *initializer = global->getInitializer(); emitConstant(initializer, memory); } } }" LLVM,NVPTX,24,"define void @print(i32 %stream, i32 %value) #0 { %1 = call i32 @llvm.nvptx.print.i32(i32 %stream, i32 %value) ret void }" LLVM,NVPTX,25,"void doFinalization(LLVMContext& Context, NVPTXTargetMachine& TargetMachine) { }" LLVM,NVPTX,26,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(NVPTX, NVPTX, 128, NVPTX::CUDA, 128); }" LLVM,NVPTX,27,"void emitHeader(const Module *M) { OutStream &OS = M->getContext().getOutStream(); OS << ""//\n""; OS << ""// Generated by LLVM NVPTX Back-End\n""; OS << ""//\n""; OS << ""\n""; OS << "".version 3.0\n""; OS << "".target "" << NVPTX::NVCL << "", texmode_independent"" << NVPTX::CUDA << "", map_f64_to_f32"" << "", debug\n""; OS << "".address_size "" << (M->getContext().getTargetInfo().getPointerWidth(0) == 64 ? ""64"" : ""32"") << ""\n""; OS << ""\n""; }" LLVM,NVPTX,28,"void EmitInstruction(LLVMContext &Context, Instruction *Inst) { HazardState *State = getHazardState(Inst); switch (Inst->getOpcode()) { case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::Div: State->setWarp(Inst->getParent()->getWarp()); break; case Instruction::Load: case Instruction::Store: State->setWarp(Inst->getParent()->getWarp()); break; case Instruction::Call: State->setWarp(Inst->getParent()->getWarp()); break; case Instruction::Ret: State->setWarp(Inst->getParent()->getWarp()); break; default: State->setWarp(nullptr); break; } State->advance(); }" LLVM,NVPTX,29," const TargetVariant *TV) { const MachineOperand &MO = MI->getOperand(OpNum); const MachineMemOperand *MMO = dyn_cast(MO); if (!MMO) return; const TargetAddressingMode *TAM = TV->getAddressingMode(MMO->getAddrMode()); if (!TAM) return; const char *RegName = TAM->getRegName(MMO->getReg()); if (!RegName) return; const char *OffsetName = TAM->getOffsetName(MMO->getOffset()); if (!OffsetName) return; const char *ScaleName = TAM->getScaleName(MMO->getScale()); if (!ScaleName) return; const char *SegmentName = TAM->getSegmentName(MMO->getSegment()); if (!SegmentName) return; const char *BaseName = TAM->getBaseName(MMO->getBase()); if (!BaseName) return; const char *IndexName = TAM->getIndexName(MMO->getIndex()); if (!IndexName) return; const char *DispName = TAM->getDispName(MMO->getDisp()); if (!DispName) return; Out << RegName << ""["" << OffsetName << ""+"" << ScaleName << ""*"" << BaseName << ""+"" << IndexName << ""+"" << DispName << ""]""; }" LLVM,NVPTX,30,"void readLine(llvm::Value* stream) { llvm::Value* targetSpecificValue = llvm::Intrinsic::getDeclaration( stream->getContext(), llvm::Intrinsic::nvptx_readline); llvm::Value* result = llvm::Intrinsic::call( stream->getContext(), llvm::Intrinsic::nvptx_readline, {stream, targetSpecificValue}); return result; }" LLVM,NVPTX,31,"void doFinalization(Module &M) override { if (M.getNamedMetadata(""llvm.dbg.cu"")) { M.getNamedMetadata(""llvm.dbg.cu"")->addOperand( M.getContext(), MDNode::get(M.getContext(), {MDString::get(M.getContext(), ""llvm.dbg.cu"")})); } if (M.getNamedMetadata(""llvm.dbg.cu"")) { M.getNamedMetadata(""llvm.dbg.cu"")->addOperand( M.getContext(), MDNode::get(M.getContext(), {MDString::get(M.getContext(), ""llvm.dbg.cu"")})); } if (M.getNamedMetadata(""llvm.dbg.cu"")) { M.getNamedMetadata(""llvm.dbg.cu"")->addOperand( M.getContext(), MDNode::get(M.getContext(), {MDString::get(M.getContext(), ""llvm.dbg.cu"")})); } if (M.getNamedMetadata(""llvm.dbg.cu"")) { M.getNamedMetadata(""llvm.dbg.cu"")->addOperand( M.getContext(), MDNode::get(M.getContext(), {MDString::get(M.getContext(), ""llvm.dbg.cu"")})); }" LLVM,NVPTX,32,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTargetSpecificValue(""NVPTX"", ""Module has aliases, which NVPTX does not support.""); AP->setTargetSpecificValue(""NVPTX"", ""Module has a nontrivial global ctor, which NVPTX does not support.""); AP->setTargetSpecificValue(""NVPTX"", ""Module has a nontrivial global dtor, which NVPTX does not support.""); }" LLVM,NVPTX,33,"void emitBasicBlockStart(LLVMContext &Context, BasicBlock *BB) { if (BB->hasTargetSpecificValue()) { auto TargetSpecificValue = BB->getTargetSpecificValue(); if (TargetSpecificValue->getTarget() == ""NVPTX"") { Context.emit("".pragma \""nounroll\"";\n""); } } }" LLVM,NVPTX,34,"void emitFunctionBodyEnd(Function &F, MachineFunction &MF) { } ``` This function takes two arguments: `F`, which is a reference to the `Function` being compiled, and `MF`, which is a reference to the `MachineFunction` being generated. The function body is empty, but you can add target-specific code to emit stuff after the last basic block in the function. For example, if you want to emit a `ret` instruction to return from the function, you can add the following code: ``` void emitFunctionBodyEnd(Function &F, MachineFunction &MF) { MF.addInstruction(Instruction::CreateRet(MF.getContext())); }" LLVM,NVPTX,35,void emitFunctionBodyStart(Function *F) { if (F->getTarget() == NVPTX) { } } LLVM,NVPTX,36,"void emitFunctionEntryLabel(LLVMContext &Context, Function *F) { const char *TargetTriple = ""nvptx-nvidia-cuda""; const char *EntryLabel = "".entry""; const char *FuncLabel = "".func""; const char *EntryPoint = ""{\n""; Context.emitLabel(EntryLabel); Context.emitLabel(FuncLabel); Context.emit(EntryPoint); }" LLVM,NVPTX,37,"void emitGlobals(Module *M) { std::vector globals = M->getGlobalVariables(); for (auto global : globals) { Type *type = global->getType(); uint64_t size = type->getSize(); void *memory = malloc(size); GlobalAddress[global->getName()] = memory; if (global->hasInitializer()) { Constant *initializer = global->getInitializer(); emitConstant(initializer, memory); } } }" LLVM,NVPTX,38, LLVM,NVPTX,39,"void emitImplicitDef(Instruction *I, raw_ostream &OS) { if (I->getOpcode() == Instruction::IMPLICIT_DEF) { OS << ""implicit-def: "" << I->getOperand(0)->getName() << ""\n""; } }" LLVM,NVPTX,40, LLVM,NVPTX,41,"void emitStartOfAsmFile(MCStreamer &Streamer) { Streamer.emitIntValue(NVPTX, 128); }" LLVM,NVPTX,42,"Value* getFunctionFrameSymbol(Function* F) { if (!F->hasStackFrame()) { return nullptr; } if (F->getStackFrame()->isRegisterBased()) { return nullptr; } StringRef symbolName = ""__func_pseudo_stack""; Type* symbolType = Type::getInt32Ty(F->getContext()); Value* symbol = new GlobalVariable(F->getParent(), symbolType, false, GlobalValue::ExternalLinkage, nullptr, symbolName); F->setPseudoStack(symbol); return symbol; }" LLVM,NVPTX,43, LLVM,NVPTX,44," const TargetVariant *TV) { const MachineOperand &MO = MI->getOperand(OpNum); const MachineMemOperand *MMO = dyn_cast(MO); if (!MMO) return; const TargetAddressingMode *TAM = TV->getAddressingMode(MMO->getAddrMode()); if (!TAM) return; const char *RegName = TAM->getRegName(MMO->getReg()); if (!RegName) return; const char *OffsetName = TAM->getOffsetName(MMO->getOffset()); if (!OffsetName) return; const char *ScaleName = TAM->getScaleName(MMO->getScale()); if (!ScaleName) return; const char *SegmentName = TAM->getSegmentName(MMO->getSegment()); if (!SegmentName) return; const char *BaseName = TAM->getBaseName(MMO->getBase()); if (!BaseName) return; const char *IndexName = TAM->getIndexName(MMO->getIndex()); if (!IndexName) return; const char *DispName = TAM->getDispName(MMO->getDisp()); if (!DispName) return; Out << RegName << ""["" << OffsetName << ""+"" << ScaleName << ""*"" << BaseName << ""+"" << IndexName << ""+"" << DispName << ""]""; }" LLVM,NVPTX,45,"void PrintAsmOperand(const char *operand) { printf(""%s"", operand); }" LLVM,NVPTX,46,void runOnMachineFunction(MachineFunction &MF) { const TargetTriple &TT = MF.getTargetTriple(); MF.emitFunctionBody(TT); MF.emitFunctionEpilogue(TT); } LLVM,NVPTX,47,"void doFinalization(LLVMContext& Context, Module& M) { const Target& NVPTXTarget = Context.getTarget(); const char* NVPTXTargetName = NVPTXTarget.getName(); if (NVPTXTargetName == ""nvptx"") { M.getOrInsertNamedMetadata(""llvm.dbg.cu""); M.getOrInsertNamedMetadata(""llvm.dbg.cu.section""); M.getOrInsertNamedMetadata(""llvm.dbg.cu.section.debug_loc""); M.getOrInsertNamedMetadata(""llvm.dbg.cu.section.debug_loc.nvptx""); M.getOrInsertNamedMetadata(""llvm.dbg.cu.section.debug_loc.nvptx""); M.getOrInsertNamedMetadata(""llvm.dbg.cu.section.debug_loc.nvptx""); } }" LLVM,NVPTX,48,"void print(const char *format, ...) { va_list args; va_start(args, format); nvptx::LowerConstant(format, args); va_end(args); }" LLVM,NVPTX,49,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(Target::NVPTX); AP->setModule(M); AP->setNVPTX(true); AP->setNVPTXAliases(false); AP->setNVPTXGlobalCtors(false); AP->setNVPTXGlobalDtors(false); AP->setNVPTXFileScopeInlineAssembly(false); AP->setTargetSpecificValue(Target::NVPTX, ""Module has aliases, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, ""Module has a nontrivial global ctor, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, ""Module has a nontrivial global dtor, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, 128, ""Start of file scope inline assembly""); AP->setTargetSpecificValue(Target::NVPTX, 128, ""End of file scope inline assembly""); }" LLVM,NVPTX,50,"void emitGlobalVariable(GlobalVariable *GV, Module *M) { const char *Triple = ""nvptx-unknown-unknown""; const char *DataLayout = ""e-p:32:32-i64:64-v16:16-v32:32-n16:32:64""; const TargetMachine *TM = M->getTargetMachine(); StringRef Name = GV->getName(); Type *Ty = GV->getType(); unsigned Align = GV->getAlignment(); Constant *Init = GV->getInitializer(); TM->emitGlobalVariable(Name, Ty, Align, Init, DataLayout, Triple); }" LLVM,NVPTX,51, LLVM,NVPTX,52,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(NVPTX::CUDA); AP->setFileScopeInlineAssembly(true); AP->setFileScopeInlineAssemblyStart(""Start of file scope inline assembly""); AP->setFileScopeInlineAssemblyEnd(""End of file scope inline assembly""); M->setAsmPrinter(AP); }" LLVM,NVPTX,53,"void emitGlobals(Module *M) { std::vector globals = M->getGlobalVariables(); for (auto global : globals) { Type *type = global->getType(); uint64_t size = type->getSize(); void *memory = malloc(size); GlobalAddress[global->getName()] = memory; if (global->hasInitializer()) { Constant *initializer = global->getInitializer(); emitConstant(initializer, memory); } } }" LLVM,NVPTX,54,"void emitHeader(LLVMContext &Context, NVPTXTargetMachine &TM) { std::string header = ""//\n// Generated by LLVM NVPTX Back-End\n//\n\n.version 10.10\n.target NVCL, texmode_independent CUDA, map_f64_to_f32, debug\n.address_size 64 32\n\n""; TM.emitHeader(header); }" LLVM,NVPTX,55,"void emitImplicitDef(Instruction *I, raw_ostream &OS) { if (I->getOpcode() == Instruction::IMPLICIT_DEF) { OS << ""implicit-def: "" << I->getOperand(0)->getName() << "" = "" << I->getOperand(1)->getName() << ""\n""; } }" LLVM,NVPTX,56,"void EmitInstruction(LLVMContext &Context, Instruction *Inst) { HazardState *State = getHazardState(Inst); switch (Inst->getOpcode()) { case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::Div: State->setWarp(Inst->getParent()->getWarp()); break; case Instruction::Load: case Instruction::Store: State->setWarp(Inst->getParent()->getWarp()); break; case Instruction::Call: State->setWarp(Inst->getParent()->getWarp()); break; case Instruction::Ret: State->setWarp(Inst->getParent()->getWarp()); break; default: State->setWarp(nullptr); break; } State->advance(); }" LLVM,NVPTX,57, LLVM,NVPTX,58,"define void @print(i8* %stream, i8* %operand) #0 { %1 = call i8* @nvptx::LowerConstant(i8* %operand) call void @nvptx::Print(i8* %stream, i8* %1) ret void }" LLVM,NVPTX,59,"void doFinalization(Module &M) { } ``` This function takes a `Module` object as an argument, which represents the entire module being compiled. It can be used to perform any necessary clean up or finalization tasks after all passes have run. The `NVPTX` target specific value is used to indicate that this function should only be called when the target architecture is NVPTX. The `0` values for the `NVPTX` and `0` arguments are used to indicate that this function should be called for all passes, regardless of the pass number. The `""//\t}""` argument is used to indicate that this function should be called after all passes have run. Here is an example of how this function can be used in the LLVM Compiler: ``` void doFinalization(Module &M) { }" LLVM,NVPTX,60,"void emitHeader(const NVPTX::NVCL &nvcl) { outs() << ""//\n"" << ""// Generated by LLVM NVPTX Back-End\n"" << ""//\n"" << ""\n"" << "".version "" << nvcl.getVersion() << ""."" << nvcl.getVersion() << ""\n"" << "".target "" << nvcl.getTarget() << "" "" << nvcl.getTarget() << "" "" << nvcl.getTarget() << ""::NVCL, texmode_independent\n"" << ""//, debug\n"" << "".address_size "" << nvcl.getAddressSize() << ""\n"" << ""\n""; }" LLVM,NVPTX,61,void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const NVPTXTarget &NVPTX = static_cast(T); MF.emitFunctionBody(NVPTX); } LLVM,NVPTX,62,"void emitImplicitDef(Instruction *I, raw_ostream &OS) { if (I->getOpcode() == Instruction::IMPLICIT_DEF) { OS << ""implicit-def: "" << I->getOperand(0)->getName() << "" = "" << I->getOperand(1)->getName() << ""\n""; } }" LLVM,NVPTX,63,"const char *getSymbolName(unsigned index) { switch (index) { case 0: return ""_$_""; case 1: return ""_%_""; default: return nullptr; } }" LLVM,NVPTX,64, LLVM,NVPTX,65, LLVM,NVPTX,66,"void emitImplicitDef(Instruction *I, raw_ostream &OS) { if (I->getOpcode() == Instruction::IMPLICIT_DEF) { OS << ""implicit-def: "" << I->getOperand(0)->getName() << "" = "" << I->getOperand(1)->getName() << ""\n""; } }" LLVM,NVPTX,67,"void EmitInstruction(LLVMContext& Context, Instruction* Inst) { HazardState* Hazard = Inst->getHazardState(); switch (Inst->getOpcode()) { case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::Div: Hazard->advance(1); break; case Instruction::And: case Instruction::Or: case Instruction::Xor: break; case Instruction::Load: case Instruction::Store: Hazard->checkHazards(Inst); break; default: break; } }" LLVM,NVPTX,68,"void doFinalization(LLVMContext& Context, Module& M) { } ``` This function takes two arguments: * `Context`: The LLVM context in which the module is being compiled. * `M`: The module being compiled. Inside the function, you can perform any necessary clean up operations, such as deallocating memory or closing files. Note that the `doFinalization` function is a virtual method that can be overridden by subclasses to provide target-specific behavior. In this case, the `NVPTX` backend of the LLVM Compiler provides a default implementation of `doFinalization` that does nothing. However, you can override this function in a subclass to provide target-specific clean up behavior. Here's an example of how you might override the `doFinalization` function in a subclass of the `NVPTX` backend: ``` class MyNVPTXBackend : public NVPTXBackend { public: void doFinalization(LLVMContext& Context, Module& M) { }" LLVM,NVPTX,69,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(Target::NVPTX); AP->setModule(M); AP->setNVPTX(true); AP->setNVPTXAliases(false); AP->setNVPTXGlobalCtors(false); AP->setNVPTXGlobalDtors(false); AP->setNVPTXFileScopeInlineAssembly(false); AP->setTargetSpecificValue(Target::NVPTX, ""Module has aliases, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, ""Module has a nontrivial global ctor, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, ""Module has a nontrivial global dtor, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, 128, ""Start of file scope inline assembly""); AP->setTargetSpecificValue(Target::NVPTX, 128, ""End of file scope inline assembly""); }" LLVM,NVPTX,70,"void emitHeader(const NVPTX::NVCL &nvcl) { outs() << ""//\n"" << ""// Generated by LLVM NVPTX Back-End\n"" << ""//\n"" << ""\n"" << "".version "" << nvcl.getVersion() << ""."" << nvcl.getVersion() << ""\n"" << "".target "" << nvcl.getTarget() << "" "" << nvcl.getTarget() << "" "" << nvcl.getTarget() << ""::NVCL, texmode_independent\n"" << ""//, debug\n"" << "".address_size "" << nvcl.getAddressSize() << ""\n"" << ""\n""; }" LLVM,NVPTX,71,"void emitImplicitDef(Instruction *I, raw_ostream &OS) { if (I->getOpcode() == Instruction::IMPLICIT_DEF) { OS << ""implicit-def: "" << I->getOperand(0)->getName() << ""\n""; } }" LLVM,NVPTX,72," const TargetSpecificValue& NVPTX) { switch (Inst.getOpcode()) { case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::Div: case Instruction::Rem: Context.advanceHazardState(NVPTX, NVPTX); break; case Instruction::And: case Instruction::Or: case Instruction::Xor: case Instruction::Not: Context.advanceHazardState(NVPTX, NVPTX); break; default: Context.advanceHazardState(NVPTX, NVPTX); break; } }" LLVM,NVPTX,73,"void print(const char *format, ...) { va_list args; va_start(args, format); nvptx::LowerConstant(format, args); va_end(args); }" LLVM,NVPTX,74,void runOnMachineFunction(MachineFunction &MF) { const TargetTriple &TT = MF.getTargetTriple(); MF.emitFunctionBody(TT); MF.emitFunctionEpilogue(TT); } LLVM,NVPTX,75,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(Target::NVPTX); AP->setTargetTriple(""nvptx-nvidia-cuda""); AP->setTargetData(new TargetData(128, 128, 8, 8)); AP->setTargetSpecificValue(""Missed a global variable"", 0); AP->setTargetSpecificValue(""Did not fully process a global variable"", 0); }" LLVM,NVPTX,76,"void emitHeader(LLVMContext &Context, NVPTXTargetMachine &TM) { std::string header = ""//\n// Generated by LLVM NVPTX Back-End\n//\n\n.version 10.10\n.target NVCL, texmode_independent CUDA, map_f64_to_f32, debug\n.address_size 64 32\n\n""; TM.emitHeader(header); }" LLVM,NVPTX,77, LLVM,NVPTX,78,"void emitGlobals(NVPTXTargetMachine *TM) { SmallVector Globals; for (auto &GV : TM->getModule()->globals()) { if (GV->hasExternalLinkage()) Globals.push_back(GV); } for (auto &GV : Globals) { uint64_t Address = TM->getGlobalAddress(GV); TM->emitGlobalVariable(GV, Address); } }" LLVM,NVPTX,79,"Value* getFunctionFrameSymbol(Function* F) { if (!F->hasStackFrame()) { return nullptr; } if (F->getStackFrame()->isRegisterBased()) { return nullptr; } StringRef symbolName = ""__func_pseudo_stack""; Type* symbolType = Type::getInt32Ty(F->getContext()); Value* symbol = new GlobalVariable(F->getParent(), symbolType, false, GlobalValue::ExternalLinkage, nullptr, symbolName); F->setPseudoStack(symbol); return symbol; }" LLVM,NVPTX,80,"void doFinalization(Module &M) override { if (M.getNamedMetadata(""llvm.dbg.cu"")) { M.getNamedMetadata(""llvm.dbg.cu"")->addOperand( M.getContext(), MDNode::get(M.getContext(), {MDString::get(M.getContext(), ""llvm.dbg.cu"")})); } if (M.getNamedMetadata(""llvm.dbg.cu"")) { M.getNamedMetadata(""llvm.dbg.cu"")->addOperand( M.getContext(), MDNode::get(M.getContext(), {MDString::get(M.getContext(), ""llvm.dbg.cu"")})); } if (M.getNamedMetadata(""llvm.dbg.cu"")) { M.getNamedMetadata(""llvm.dbg.cu"")->addOperand( M.getContext(), MDNode::get(M.getContext(), {MDString::get(M.getContext(), ""llvm.dbg.cu"")})); } if (M.getNamedMetadata(""llvm.dbg.cu"")) { M.getNamedMetadata(""llvm.dbg.cu"")->addOperand( M.getContext(), MDNode::get(M.getContext(), {MDString::get(M.getContext(), ""llvm.dbg.cu"")})); }" LLVM,NVPTX,81,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(Target::NVPTX); AP->setModule(M); AP->setNVPTX(true); AP->setNVPTXAliases(false); AP->setNVPTXGlobalCtors(false); AP->setNVPTXGlobalDtors(false); AP->setNVPTXFileScopeInlineAssembly(false); AP->setTargetSpecificValue(Target::NVPTX, ""Module has aliases, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, ""Module has a nontrivial global ctor, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, ""Module has a nontrivial global dtor, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, 128, ""Start of file scope inline assembly""); AP->setTargetSpecificValue(Target::NVPTX, 128, ""End of file scope inline assembly""); }" LLVM,NVPTX,82,"define void @print(i32 %stream, i32 %value) #0 { %1 = call i32 @llvm.nvptx.print.i32(i32 %stream, i32 %value) ret void }" LLVM,NVPTX,83,"void doFinalization(NVPTX &NVPTX, NVPTX &NVPTX, NVPTX &NVPTX) { NVPTX.emitSection("".debug_loc"", """"); }" LLVM,NVPTX,84,"void print(const char *format, ...) { va_list args; va_start(args, format); nvptx::LowerConstant(format, args); va_end(args); }" LLVM,NVPTX,85,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(Target::NVPTX); AP->setModule(M); AP->setNVPTX(true); AP->setNVPTXAliases(false); AP->setNVPTXGlobalCtors(false); AP->setNVPTXGlobalDtors(false); AP->setNVPTXFileScopeInlineAssembly(false); AP->setTargetSpecificValue(Target::NVPTX, ""Module has aliases, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, ""Module has a nontrivial global ctor, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, ""Module has a nontrivial global dtor, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, 128, ""Start of file scope inline assembly""); AP->setTargetSpecificValue(Target::NVPTX, 128, ""End of file scope inline assembly""); }" LLVM,NVPTX,86,"define void @print(i8* %stream, i8* %operand) #0 { %1 = call i8* @nvptx::LowerConstant(i8* %operand) call void @nvptx::Print(i8* %stream, i8* %1) ret void }" LLVM,NVPTX,87,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(Target::NVPTX); AP->setTargetSpecificValue(NVPTX, NVPTX, NVPTX, NVPTX, NVPTX, ""Module has aliases, which NVPTX does not support.""); AP->setTargetSpecificValue(128, ""Start of file scope inline assembly""); AP->setTargetSpecificValue(128, ""End of file scope inline assembly""); }" LLVM,NVPTX,88,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(NVPTX::CUDA); AP->setFileScopeInlineAssembly(true); AP->setFileScopeInlineAssemblyStart(""Start of file scope inline assembly""); AP->setFileScopeInlineAssemblyEnd(""End of file scope inline assembly""); M->setAsmPrinter(AP); }" LLVM,NVPTX,89,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(Target::NVPTX); AP->setModule(M); AP->setNVPTX(true); AP->setNVPTXAliases(false); AP->setNVPTXGlobalCtors(false); AP->setNVPTXGlobalDtors(false); AP->setNVPTXFileScopeInlineAssembly(false); AP->setTargetSpecificValue(Target::NVPTX, ""Module has aliases, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, ""Module has a nontrivial global ctor, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, ""Module has a nontrivial global dtor, which NVPTX does not support.""); AP->setTargetSpecificValue(Target::NVPTX, 128, ""Start of file scope inline assembly""); AP->setTargetSpecificValue(Target::NVPTX, 128, ""End of file scope inline assembly""); }" LLVM,NVPTX,90,"void doFinalization(LLVMContext& Context, Module& M) override { }" LLVM,NVPTX,91,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(NVPTX); AP->setTargetData(NVPTX); AP->setTargetTriple(NVPTX); AP->setTargetArch(NVPTX); AP->setTargetABI(NVPTX); AP->setTargetAddressSpace(128); AP->setTargetInlineAssembly(""Start of file scope inline assembly""); AP->setTargetInlineAssembly(""End of file scope inline assembly""); }" LLVM,NVPTX,92,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(NVPTX, NVPTX, 128, NVPTX::CUDA, 128); }" LLVM,NVPTX,93,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(NVPTX::CUDA); AP->setFileScopeInlineAssembly(true); AP->setFileScopeInlineAssemblyStart(""Start of file scope inline assembly""); AP->setFileScopeInlineAssemblyEnd(""End of file scope inline assembly""); M->setAsmPrinter(AP); }" LLVM,NVPTX,94,"void doInitialization(Module *M) { AsmPrinter *AP = new AsmPrinter(M); AP->setTarget(NVPTX, NVPTX, 128, NVPTX::CUDA); }" LLVM,NVPTX,95,"void doFinalization(LLVMContext& Context, NVPTXTargetMachine& TargetMachine) { }" LLVM,NVPTX,96, LLVM,NVPTX,97,"void emitGlobals(Module *M) { std::vector globals = M->getGlobalVariables(); for (auto global : globals) { Type *type = global->getType(); uint64_t size = type->getSize(); void *memory = malloc(size); GlobalAddress[global->getName()] = memory; if (global->hasInitializer()) { Constant *initializer = global->getInitializer(); emitConstant(initializer, memory); } } }" LLVM,NVPTX,98,"void emitHeader(const Module *M) { outs() << ""//\n"" << ""// Generated by LLVM NVPTX Back-End\n"" << ""//\n"" << ""\n"" << "".version "" << 10 << ""."" << 10 << ""\n"" << "".target "" << NVPTX << NVPTX << NVPTX::NVCL << "", texmode_independent"" << "", map_f64_to_f32"" << "", debug"" << ""\n"" << "".address_size "" << 64 << ""32"" << ""\n"" << ""\n""; }" LLVM,NVPTX,99,"void emitImplicitDef(Instruction *I, raw_ostream &OS) { if (I->getOpcode() == Instruction::IMPLICIT_DEF) { OS << ""implicit-def: "" << I->getOperand(0)->getName() << "" = "" << I->getOperand(1)->getName() << ""\n""; } }" LLVM,NVPTX,100,"void EmitInstruction(LLVMContext& Context, Instruction* Inst) { HazardState* Hazard = Inst->getHazardState(); switch (Inst->getOpcode()) { case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::Div: Hazard->advance(1); break; case Instruction::And: case Instruction::Or: case Instruction::Xor: break; case Instruction::Load: case Instruction::Store: Hazard->checkHazards(Inst); break; default: break; } }" LLVM,NVPTX,101,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,102, LLVM,NVPTX,103,"define void @print(i32 %stream, i32 %value) #0 { %1 = call i32 @llvm.nvptx.print.i32(i32 %stream, i32 %value) ret void }" LLVM,NVPTX,104," const TargetVariant *TV) { const MachineOperand &MO = MI->getOperand(OpNum); const MachineMemOperand *MMO = dyn_cast(MO); if (!MMO) return; const TargetAddressingMode *TAM = TV->getAddressingMode(MMO->getAddrMode()); if (!TAM) return; const char *RegName = TAM->getRegName(MMO->getReg()); if (!RegName) return; const char *OffsetName = TAM->getOffsetName(MMO->getOffset()); if (!OffsetName) return; const char *ScaleName = TAM->getScaleName(MMO->getScale()); if (!ScaleName) return; const char *SegmentName = TAM->getSegmentName(MMO->getSegment()); if (!SegmentName) return; const char *BaseName = TAM->getBaseName(MMO->getBase()); if (!BaseName) return; const char *IndexName = TAM->getIndexName(MMO->getIndex()); if (!IndexName) return; const char *DispName = TAM->getDispName(MMO->getDisp()); if (!DispName) return; Out << RegName << ""["" << OffsetName << ""+"" << ScaleName << ""*"" << BaseName << ""+"" << IndexName << ""+"" << DispName << ""]""; }" LLVM,NVPTX,105,"void PrintAsmOperand(const char *operand) { printf(""%s"", operand); }" LLVM,NVPTX,106,"void readLine(llvm::Value* stream) { llvm::Value* targetSpecificValue = llvm::Intrinsic::getDeclaration( stream->getContext(), llvm::Intrinsic::nvptx_readline); llvm::Value* result = llvm::Intrinsic::call( stream->getContext(), llvm::Intrinsic::nvptx_readline, {stream, targetSpecificValue}); return result; }" LLVM,NVPTX,107,void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const NVPTXTargetInfo &NVPTX = T.getNVPTXTargetInfo(); MF.emitFunctionBody(); MF.emitFunctionEpilogue(); } LLVM,NVPTX,108,"void runOnModule(Module &M) override { const char *triple = M.getTargetTriple(); if (triple == ""nvptx"" || triple == ""nvptx64"") { } }" LLVM,NVPTX,109,"void runOnModule(Module &M) override { const char *triple = M.getTargetTriple(); if (triple == ""nvptx"" || triple == ""nvptx64"") { } }" LLVM,NVPTX,110,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,111,"const char *getPassName(const Pass *P) { if (P->getPassKind() == PassKind::NVPTX) { return ""NVPTX lower atomics of local memory""; } else { return P->getPassName(); } }" LLVM,NVPTX,112,class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} virtual bool runOnFunction(Function &F) { return false; } LLVM,NVPTX,113, LLVM,NVPTX,114, LLVM,NVPTX,115, LLVM,NVPTX,116,"void eliminateCallFramePseudoInstr(Function &F, TargetMachine &TM) { if (!TM.getTarget().hasFeature(""nvptx-call-frame-setup"")) return; for (auto &I : instructions(F)) { if (I.getOpcode() == Instruction::CallFrameSetup || I.getOpcode() == Instruction::CallFrameDestroy) { I.replaceAllUsesWith(UndefValue::get(I.getType())); I.eraseFromParent(); } } }" LLVM,NVPTX,117,"void emitEpilogue(Function *F, MachineFunction *MF) { const Target *T = MF->getTarget(); const TargetMachine *TM = MF->getTargetMachine(); BasicBlock *EpilogBB = MF->getEntryBlock(); Instruction *EpilogInst = EpilogBB->getTerminator(); BasicBlock *NewEpilogBB = BasicBlock::Create(MF->getContext(), ""epilog"", F); EpilogInst->moveBefore(NewEpilogBB->getFirstNonPHI()); Instruction *BranchInst = BranchInst::Create(NewEpilogBB, EpilogBB); BranchInst->setDebugLoc(EpilogInst->getDebugLoc()); F->setEntryBlock(NewEpilogBB); F->setExitBlock(EpilogBB); F->setEpilogBlock(NewEpilogBB); F->setEpilogInst(EpilogInst); F->setEpilogBranchInst(BranchInst); }" LLVM,NVPTX,118,"void emitPrologue(Function &F, const TargetMachine &TM) { const Target &T = TM.getTarget(); const NVPTXTarget &NVPTX = static_cast(T); const NVPTXSubtarget &ST = NVPTX.getSubtarget(); BasicBlock *BB = F.getEntryBlock(); Instruction *InsertPt = &*BB->begin(); if (ST.hasFeature(NVPTX::Feature::ShrinkWrapping)) { } else { } if (ST.hasFeature(NVPTX::Feature::VRFrame)) { } else { } if (ST.hasFeature(NVPTX::Feature::MOV_DEPOT_ADDR)) { } else { }" LLVM,NVPTX,119, LLVM,NVPTX,120,"void eliminateCallFramePseudoInstr(Function &F, TargetMachine &TM) { if (!TM.getTarget().hasFeature(""nvptx-call-frame-setup"")) return; for (auto &I : instructions(F)) { if (I.getOpcode() == Instruction::CallFrameSetup || I.getOpcode() == Instruction::CallFrameDestroy) { I.replaceAllUsesWith(UndefValue::get(I.getType())); I.eraseFromParent(); } } }" LLVM,NVPTX,121, LLVM,NVPTX,122,"static void getDwarfFrameBase(const Function &F, const Module &M) { const DebugInfoMetadata *DIM = F.getDebugInfoMetadata(); if (!DIM) { return; } const DebugInfoMetadata *FrameBase = DIM->getFrameBase(); if (!FrameBase) { return; } unsigned RegNum = FrameBase->getRegNum(); int Offset = FrameBase->getOffset(); raw_ostream &OS = M.getContext().getStream(); OS << ""NVPTX,NVPTX,0""; OS << "" "" << RegNum; OS << "" "" << Offset; OS << ""\n""; }" LLVM,NVPTX,123, LLVM,NVPTX,124,"void emitPrologue(Function &F, const TargetMachine &TM) { const Target &T = TM.getTarget(); const NVPTXTarget &NVPTX = static_cast(T); const NVPTXSubtarget &ST = NVPTX.getSubtarget(); BasicBlock *BB = F.getEntryBlock(); Instruction *InsertPt = &*BB->begin(); if (ST.hasFeature(NVPTX::Feature::ShrinkWrapping)) { } else { } if (ST.hasFeature(NVPTX::Feature::VRFrame)) { } else { } if (ST.hasFeature(NVPTX::Feature::MOV_DEPOT_ADDR)) { } else { }" LLVM,NVPTX,125, LLVM,NVPTX,126,"void emitPrologue(Function &F, const TargetMachine &TM) { const Target &T = TM.getTarget(); const NVPTXTarget &NVPTX = static_cast(T); const NVPTXSubtarget &ST = NVPTX.getSubtarget(); BasicBlock *BB = F.getEntryBlock(); Instruction *InsertPt = &*BB->begin(); if (ST.hasFeature(NVPTX::Feature::ShrinkWrapping)) { } else { } if (ST.hasFeature(NVPTX::Feature::VRFrame)) { } else { } if (ST.hasFeature(NVPTX::Feature::MOV_DEPOT_ADDR)) { } else { }" LLVM,NVPTX,127,"void emitPrologue(Function &F, MachineFunction &MF) { const Target &T = MF.getTarget(); const NVPTXSubtarget &ST = T.getSubtarget(); const NVPTXTargetMachine &TM = ST.getTargetMachine(); Register FramePtr = MF.getFramePointer(); Register StackPtr = MF.getStackPointer(); const MCInst *Prolog = MF.getProlog(); const MCInst *Epilog = MF.getEpilog(); if (Prolog) { for (const MCInst &Inst : *Prolog) { MF.emitInstruction(Inst); } } if (FramePtr && StackPtr) { MF.emitInstruction(TM.createFramePointerSetup(FramePtr, StackPtr)); MF.emitInstruction(TM.createStackPointerSetup(StackPtr, FramePtr)); } if (Epilog) { for (const MCInst &Inst : *Epilog) { MF.emitInstruction(Inst); } } }" LLVM,NVPTX,128,"void emitPrologue(Function &F, const TargetMachine &TM) { const Target &NVPTXTarget = TM.getTarget(); const TargetMachine::TargetSpecificValue *NVPTXTargetSpecificValues = NVPTXTarget.getTargetSpecificValues(); const Function::PrologueEpilogueCode *PrologueEpilogueCode = F.getPrologueEpilogueCode(); for (const auto &Inst : PrologueEpilogueCode->getPrologue()) { Inst->emit(F, TM); } for (const auto &Inst : PrologueEpilogueCode->getEpilogue()) { Inst->emit(F, TM); } }" LLVM,NVPTX,129,"void emitEpilogue(Function *F, MachineFunction *MF) { const Target *T = MF->getTarget(); const TargetMachine *TM = MF->getTargetMachine(); BasicBlock *EpilogBB = MF->getEntryBlock(); Instruction *EpilogInst = EpilogBB->getTerminator(); BasicBlock *NewEpilogBB = BasicBlock::Create(MF->getContext(), ""epilog"", F); EpilogInst->moveBefore(NewEpilogBB->getFirstNonPHI()); Instruction *BranchInst = BranchInst::Create(NewEpilogBB, EpilogBB); BranchInst->setDebugLoc(EpilogInst->getDebugLoc()); F->setEntryBlock(NewEpilogBB); F->setExitBlock(EpilogBB); F->setEpilogBlock(NewEpilogBB); F->setEpilogInst(EpilogInst); F->setEpilogBranchInst(BranchInst); }" LLVM,NVPTX,130,"void emitPrologue(Function &F, const TargetMachine &TM) { const Target &NVPTXTarget = TM.getTarget(); const TargetMachine::TargetSpecificValue *NVPTXTargetSpecificValues = NVPTXTarget.getTargetSpecificValues(); const Function::PrologueEpilogueCode *PrologueEpilogueCode = F.getPrologueEpilogueCode(); for (const auto &Inst : PrologueEpilogueCode->getPrologue()) { Inst->emit(F, TM); } for (const auto &Inst : PrologueEpilogueCode->getEpilogue()) { Inst->emit(F, TM); } }" LLVM,NVPTX,131, LLVM,NVPTX,132,"void eliminateCallFramePseudoInstr(Function &F, TargetMachine &TM) { if (!TM.getTarget().hasFeature(""nvptx-call-frame-setup"")) return; for (auto &I : instructions(F)) { if (I.getOpcode() == Instruction::CallFrameSetup || I.getOpcode() == Instruction::CallFrameDestroy) { I.replaceAllUsesWith(UndefValue::get(I.getType())); I.eraseFromParent(); } } }" LLVM,NVPTX,133,"void emitPrologue(Function &F, const TargetMachine &TM) { const NVPTXSubtarget &ST = TM.getSubtarget(); const NVPTXTargetMachine &NVTM = TM.getTargetMachine(); BasicBlock *BB = F.getEntryBlock(); Instruction *InsertPt = BB->getFirstNonPHIOrDbgOrLifetime(); }" LLVM,NVPTX,134,"void emitPrologue(Function &F, const TargetMachine &TM) { const NVPTXSubtarget &ST = TM.getSubtarget(); const NVPTXTargetMachine &NVTM = TM.getTargetMachine(); BasicBlock *BB = F.getEntryBlock(); Instruction *InsertPt = BB->getFirstNonPHIOrDbgOrLifetime(); }" LLVM,NVPTX,135, LLVM,NVPTX,136,"void emitPrologue(Function &F, const TargetMachine &TM) { const NVPTXSubtarget &ST = TM.getSubtarget(); const NVPTXTargetMachine &NVTM = TM.getTargetMachine(); BasicBlock *BB = F.getEntryBlock(); Instruction *InsertPt = BB->getFirstNonPHIOrDbgOrLifetime(); }" LLVM,NVPTX,137,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,138,"void runOnModule(Module &M) override { auto target = M.getTarget(); if (target != ""nvptx"") { return; } }" LLVM,NVPTX,139,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,140,void runOnFunction(Function &F) { TargetSpecificValue *NVPTX = F.getTargetSpecificValue(); if (NVPTX->isType(Intrinsic::nvvm_istypep_sampler) || NVPTX->isType(Intrinsic::nvvm_istypep_surface) || NVPTX->isType(Intrinsic::nvvm_istypep_texture)) { } } LLVM,NVPTX,141,void runOnFunction(Function &F) { auto &NVPTX = getAnalysis(); auto &Intrinsic = getAnalysis(); auto &nvvm_istypep_sampler = Intrinsic.getIntrinsic(Intrinsic::nvvm_istypep_sampler); auto &nvvm_istypep_surface = Intrinsic.getIntrinsic(Intrinsic::nvvm_istypep_surface); auto &nvvm_istypep_texture = Intrinsic.getIntrinsic(Intrinsic::nvvm_istypep_texture); for (auto &BB : F) { for (auto &I : BB) { if (auto *Call = dyn_cast(&I)) { if (Call->getCalledFunction() == nvvm_istypep_sampler) { } else if (Call->getCalledFunction() == nvvm_istypep_surface) { } else if (Call->getCalledFunction() == nvvm_istypep_texture) { } } } } } LLVM,NVPTX,142,void runOnFunction(Function &F) { TargetSpecificValue *NVPTX = F.getTargetSpecificValue(); if (NVPTX->isType(Intrinsic::nvvm_istypep_sampler) || NVPTX->isType(Intrinsic::nvvm_istypep_surface) || NVPTX->isType(Intrinsic::nvvm_istypep_texture)) { } } LLVM,NVPTX,143,class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} virtual bool runOnFunction(Function &F) { return false; } LLVM,NVPTX,144,"void printInst(MCInst *Inst, raw_ostream &OS) { OS << Inst->getOpcodeName(); for (int i = 0; i < Inst->getNumOperands(); i++) { const MCOperand &Op = Inst->getOperand(i); if (Op.isReg()) { OS << "" %"" << Op.getReg(); } else if (Op.isImm()) { OS << "" $"" << Op.getImm(); } else if (Op.isMem()) { OS << "" ["" << Op.getMem().getBase() << "" + "" << Op.getMem().getDisp() << ""]""; } else { OS << "" ?""; } } if (Inst->getSuffix()) { OS << "" "" << Inst->getSuffix(); } if (Inst->getPredicate()) { OS << "" "" << Inst->getPredicate(); } if (Inst->getFlags()) { OS << "" "" << Inst->getFlags(); } if (Inst->getAddrMode()) { OS << "" "" << Inst->getAddrMode(); } if (Inst->getSize()) { OS << "" "" << Inst->getSize(); } if (Inst->getAlignment()) { OS << "" "" << Inst->getAlignment(); } if (Inst->getType()) { OS << "" "" << Inst->getType(); } if (Inst->getValue()) { OS << "" "" << Inst->getValue(); }" LLVM,NVPTX,145,"void printRegName(unsigned int reg) { switch (reg) { case NVPTX::NVPTX: printf(""NVPTX""); break; case NVPTX::NVPTX28: printf(""NVPTX28""); break; case NVPTX::BadVirtualRegisterEncoding: printf(""Bad virtual register encoding""); break; case NVPTX::0: printf(""%p""); break; case NVPTX::1: printf(""%rs""); break; case NVPTX::2: printf(""%r""); break; case NVPTX::3: printf(""%rd""); break; case NVPTX::4: printf(""%f""); break; case NVPTX::5: printf(""%fd""); break; case NVPTX::6: printf(""%h""); break; case NVPTX::7: printf(""%hh""); break; case NVPTX::0x0FFFFFFF: printf(""0x0FFFFFFF""); break; default: printf(""Unknown register""); break; } }" LLVM,NVPTX,146,"void printInst(MCInst *Inst, raw_ostream &OS) { OS << Inst->getOpcodeName(); for (int i = 0; i < Inst->getNumOperands(); i++) { const MCOperand &Op = Inst->getOperand(i); if (Op.isReg()) { OS << "" %"" << Op.getReg(); } else if (Op.isImm()) { OS << "" $"" << Op.getImm(); } else if (Op.isMem()) { OS << "" ["" << Op.getMem().getBase() << "" + "" << Op.getMem().getDisp() << ""]""; } else { OS << "" ?""; } } if (Inst->getSuffix()) { OS << "" "" << Inst->getSuffix(); } if (Inst->getPredicate()) { OS << "" "" << Inst->getPredicate(); } if (Inst->getFlags()) { OS << "" "" << Inst->getFlags(); } if (Inst->getAddrMode()) { OS << "" "" << Inst->getAddrMode(); } if (Inst->getSize()) { OS << "" "" << Inst->getSize(); } if (Inst->getAlignment()) { OS << "" "" << Inst->getAlignment(); } if (Inst->getType()) { OS << "" "" << Inst->getType(); } if (Inst->getValue()) { OS << "" "" << Inst->getValue(); }" LLVM,NVPTX,147,"void printRegName(unsigned int reg) { switch (reg) { case NVPTX: printf(""NVPTX""); break; case NVPTX: printf(""NVPTX""); break; case 28: printf(""Bad virtual register encoding""); break; case 0: printf(""0""); break; case 1: printf(""%p""); break; case 2: printf(""%rs""); break; case 3: printf(""%r""); break; case 4: printf(""%rd""); break; case 5: printf(""%f""); break; case 6: printf(""%fd""); break; case 7: printf(""%h""); break; default: printf(""Unknown register""); break; } }" LLVM,NVPTX,148,"void printRegName(unsigned int reg) { switch (reg) { case 0: printf(""%%p""); break; case 1: printf(""%%rs""); break; case 2: printf(""%%r""); break; case 3: printf(""%%rd""); break; case 4: printf(""%%f""); break; case 5: printf(""%%fd""); break; default: printf(""Bad virtual register encoding""); break; } }" LLVM,NVPTX,149,"void printInst(MCInst *Inst, raw_ostream &OS) { OS << Inst->getOpcodeName(); for (int i = 0; i < Inst->getNumOperands(); i++) { const MCOperand &Op = Inst->getOperand(i); if (Op.isReg()) { OS << "" %"" << Op.getReg(); } else if (Op.isImm()) { OS << "" $"" << Op.getImm(); } else if (Op.isMem()) { OS << "" ["" << Op.getMem().getBase() << "" + "" << Op.getMem().getDisp() << ""]""; } else { OS << "" ?""; } } if (Inst->getSuffix()) { OS << "" "" << Inst->getSuffix(); } if (Inst->getPredicate()) { OS << "" "" << Inst->getPredicate(); } if (Inst->getFlags()) { OS << "" "" << Inst->getFlags(); } if (Inst->getAddrMode()) { OS << "" "" << Inst->getAddrMode(); } if (Inst->getSize()) { OS << "" "" << Inst->getSize(); } if (Inst->getAlignment()) { OS << "" "" << Inst->getAlignment(); } if (Inst->getType()) { OS << "" "" << Inst->getType(); } if (Inst->getValue()) { OS << "" "" << Inst->getValue(); }" LLVM,NVPTX,150,"void printRegName(unsigned reg) { switch (reg) { case NVPTX::NVPTX_REG_P: printf(""%%p""); break; case NVPTX::NVPTX_REG_RS: printf(""%%rs""); break; case NVPTX::NVPTX_REG_R: printf(""%%r""); break; case NVPTX::NVPTX_REG_RL: printf(""%%rl""); break; case NVPTX::NVPTX_REG_F: printf(""%%f""); break; case NVPTX::NVPTX_REG_FL: printf(""%%fl""); break; default: printf(""Bad virtual register encoding""); break; } }" LLVM,NVPTX,151,static const TargetRegisterInfo *getRegisterInfo(const NVPTXTargetMachine &TM) { const Triple &TT = TM.getTargetTriple(); const TargetRegisterInfo *TRI = TM.getRegisterInfo(); if (TT.getArch() == Triple::NVPTX) { return TRI; } return nullptr; } LLVM,NVPTX,152, LLVM,NVPTX,153,"void copyPhysReg(int srcReg, int destReg, int width) { if (srcReg == destReg) { return; } if (!isPhysicalReg(srcReg)) { return; } if (!isPhysicalReg(destReg)) { return; } if (width != 16 && width != 32 && width != 64) { return; } if (getRegWidth(srcReg) != width || getRegWidth(destReg) != width) { return; } if (isFloatReg(srcReg)) { if (isFloatReg(destReg)) { if (width == 16) { FMOV16rr(srcReg, destReg); } else if (width == 32) { FMOV32rr(srcReg, destReg); } else if (width == 64) { FMOV64rr(srcReg, destReg); } } else { if (width == 16) { BITCONVERT_16_I2F(srcReg, destReg); } else if (width == 32) { BITCONVERT_32_I2F(srcReg, destReg); } else if (width == 64) { BITCONVERT_64_I2F(srcReg, destReg); } }" LLVM,NVPTX,154,"void copyPhysReg(unsigned int srcReg, unsigned int dstReg, unsigned int width) { if (srcReg >= NVPTX::Int1RegsRegClass || dstReg >= NVPTX::Int1RegsRegClass) { return; } if (width != 16 && width != 32 && width != 64) { return; } switch (width) { case 16: NVPTX::IMOV16rr(dstReg, srcReg); break; case 32: NVPTX::IMOV32rr(dstReg, srcReg); break; case 64: NVPTX::IMOV64rr(dstReg, srcReg); break; } }" LLVM,NVPTX,155," NVPTX::BranchCondition Condition) { if (&FallthroughMBB == &TargetMBB) { Condition = NVPTX::getOppositeCondition(Condition); } MBB.insert(new NVPTX::BranchInst(Condition, &TargetMBB, &FallthroughMBB)); }" LLVM,NVPTX,156, LLVM,NVPTX,157,"void copyPhysReg(unsigned int srcReg, unsigned int dstReg, unsigned int width) { if (srcReg >= NVPTX::Int1RegsRegClass || dstReg >= NVPTX::Int1RegsRegClass) { return; } if (width != 16 && width != 32 && width != 64) { return; } switch (width) { case 16: NVPTX::IMOV16rr(dstReg, srcReg); break; case 32: NVPTX::IMOV32rr(dstReg, srcReg); break; case 64: NVPTX::IMOV64rr(dstReg, srcReg); break; } }" LLVM,NVPTX,158," NVPTX::BranchCondition Condition) { if (&FallthroughMBB == &TargetMBB) { Condition = NVPTX::getOppositeCondition(Condition); } MBB.insert(new NVPTX::BranchInst(Condition, &TargetMBB, &FallthroughMBB)); }" LLVM,NVPTX,159,void removeBranch(MBB *MBB) { Instruction *LastInst = MBB->getLastInstruction(); if (LastInst->getOpcode() == NVPTX::GOTO || LastInst->getOpcode() == NVPTX::CBranch) { MBB->removeInstruction(LastInst); } } LLVM,NVPTX,160,"void copyPhysReg(unsigned int srcReg, unsigned int dstReg, unsigned int width) { if (srcReg >= NVPTX::Int1RegsRegClass || dstReg >= NVPTX::Int1RegsRegClass) { return; } if (width != 16 && width != 32 && width != 64) { return; } switch (width) { case 16: NVPTX::IMOV16rr(dstReg, srcReg); break; case 32: NVPTX::IMOV32rr(dstReg, srcReg); break; case 64: NVPTX::IMOV64rr(dstReg, srcReg); break; } }" LLVM,NVPTX,161,"void copyPhysReg(unsigned int srcReg, unsigned int dstReg, unsigned int width) { if (srcReg >= NVPTX::Int1RegsRegClass || dstReg >= NVPTX::Int1RegsRegClass) { return; } if (width != 16 && width != 32 && width != 64) { return; } switch (width) { case 16: NVPTX::IMOV16rr(dstReg, srcReg); break; case 32: NVPTX::IMOV32rr(dstReg, srcReg); break; case 64: NVPTX::IMOV64rr(dstReg, srcReg); break; } }" LLVM,NVPTX,162, LLVM,NVPTX,163,"void copyPhysReg(unsigned int srcReg, unsigned int dstReg, unsigned int width) { if (srcReg >= NVPTX::Int1RegsRegClass || dstReg >= NVPTX::Int1RegsRegClass) { return; } if (width != 16 && width != 32 && width != 64) { return; } switch (width) { case 16: NVPTX::IMOV16rr(dstReg, srcReg); break; case 32: NVPTX::IMOV32rr(dstReg, srcReg); break; case 64: NVPTX::IMOV64rr(dstReg, srcReg); break; } }" LLVM,NVPTX,164,"void copyPhysReg(PhysReg *src, PhysReg *dst) { if (src == dst) { return; } if (!isPhysReg(src)) { return; } if (!isPhysReg(dst)) { return; } if (getRegClass(src) != getRegClass(dst)) { return; } switch (getRegClass(src)) { case NVPTX::Int32RegsRegClass: dst->setInt32(src->getInt32()); break; case NVPTX::Int8RegsRegClass: dst->setInt8(src->getInt8()); break; case NVPTX::Int1RegsRegClass: dst->setInt1(src->getInt1()); break; case NVPTX::Float32RegsRegClass: dst->setFloat32(src->getFloat32()); break; case NVPTX::Int16RegsRegClass: dst->setInt16(src->getInt16()); break; case NVPTX::Int64RegsRegClass: dst->setInt64(src->getInt64()); break; case NVPTX::Float64RegsRegClass: dst->setFloat64(src->getFloat64()); break; default: llvm_unreachable(""Don't know how to copy a register""); } }" LLVM,NVPTX,165,static const TargetRegisterInfo *getRegisterInfo(const NVPTXTargetMachine &TM) { const Triple &TT = TM.getTargetTriple(); const TargetRegisterInfo *TRI = TM.getRegisterInfo(); if (TT.getArch() == Triple::NVPTX) { return TRI; } return nullptr; } LLVM,NVPTX,166,"const char* getPassName(const char* passName) { if (passName == ""NVPTX"") { return ""NVPTX DAG->DAG Pattern Instruction Selection""; } else { return passName; } }" LLVM,NVPTX,167,"void runOnMachineFunction(MachineFunction &MF) { const TargetMachine &TM = MF.getTargetMachine(); const NVPTXTargetMachine &NVTM = static_cast(TM); const NVPTXSubtarget &ST = NVTM.getSubtarget(); MF.emitFunctionBody(); MF.emitInstruction(new NVPTXInstruction(NVPTXInstruction::NVPTX_ADD, MF.getRegisterOperand(0), MF.getRegisterOperand(1), MF.getRegisterOperand(2))); MF.emitInstruction(new NVPTXInstruction(NVPTXInstruction::NVPTX_MUL, MF.getRegisterOperand(0), MF.getRegisterOperand(1), MF.getRegisterOperand(2))); MF.emitInstruction(new NVPTXInstruction(NVPTXInstruction::NVPTX_DIV, MF.getRegisterOperand(0), MF.getRegisterOperand(1), MF.getRegisterOperand(2))); }" LLVM,NVPTX,168,"const char* getPassName(const char* passName) { if (passName == ""NVPTX"") { return ""NVPTX DAG->DAG Pattern Instruction Selection""; } else { return passName; } }" LLVM,NVPTX,169,"void runOnMachineFunction(MachineFunction &MF) { const TargetMachine &TM = MF.getTargetMachine(); const NVPTXTargetMachine &NVTM = static_cast(TM); const NVPTXSubtarget &ST = NVTM.getSubtarget(); MF.emitFunctionBody(); MF.emitInstruction(new NVPTXInstruction(NVPTXInstruction::NVPTX_ADD, MF.getRegisterOperand(0), MF.getRegisterOperand(1), MF.getRegisterOperand(2))); MF.emitInstruction(new NVPTXInstruction(NVPTXInstruction::NVPTX_MUL, MF.getRegisterOperand(0), MF.getRegisterOperand(1), MF.getRegisterOperand(2))); MF.emitInstruction(new NVPTXInstruction(NVPTXInstruction::NVPTX_DIV, MF.getRegisterOperand(0), MF.getRegisterOperand(1), MF.getRegisterOperand(2))); }" LLVM,NVPTX,170," const TargetMachine& TM) { const AddressingMode& AM = TM.getAddressingMode(MO.getConstraint()); if (!AM.isSupported()) { IA.setError(""Unsupported addressing mode for constraint '"" + MO.getConstraint() + ""'""); return; } const TargetValue& TV = TM.getTargetValue(AM); if (!TV.isSupported()) { IA.setError(""Unsupported target-specific value for addressing mode '"" + AM.getName() + ""'""); return; } MO.setTargetValue(TV); }" LLVM,NVPTX,171,"void Select(Module &M, IRBuilder<> &Builder, Value *V) { if (V->getType()->isPointerTy() && V->getType()->getPointerElementType()->isIntegerTy(32)) { if (V->getType()->getPointerElementType()->getIntegerBitWidth() == 32) { Builder.CreateLoad(V, ""load_v2""); } else if (V->getType()->getPointerElementType()->getIntegerBitWidth() == 64) { Builder.CreateLoad(V, ""load_v4""); } else if (V->getType()->getPointerElementType()->getIntegerBitWidth() == 128) { Builder.CreateLoad(V, ""ldg_v2""); } else if (V->getType()->getPointerElementType()->getIntegerBitWidth() == 256) { Builder.CreateLoad(V, ""ldg_v4""); } else if (V->getType()->getPointerElementType()->getIntegerBitWidth() == 512) { Builder.CreateLoad(V, ""ldu_v2""); }" LLVM,NVPTX,172," const TargetMachine& TM) { const AddressingMode& AM = TM.getAddressingMode(MO.getConstraint()); if (!AM.isSupported()) { IA.setError(""Unsupported addressing mode for constraint '"" + MO.getConstraint() + ""'""); return; } const TargetValue& TV = TM.getTargetValue(AM); if (!TV.isSupported()) { IA.setError(""Unsupported target-specific value for addressing mode '"" + AM.getName() + ""'""); return; } MO.setTargetValue(TV); }" LLVM,NVPTX,173, LLVM,NVPTX,174," const TargetMachine& TM) { const AddressingMode& AM = TM.getAddressingMode(MO.getConstraint()); if (!AM.isSupported()) { IA.setError(""Unsupported addressing mode for constraint '"" + MO.getConstraint() + ""'""); return; } const TargetValue& TV = TM.getTargetValue(AM); if (!TV.isSupported()) { IA.setError(""Unsupported target-specific value for addressing mode '"" + AM.getName() + ""'""); return; } MO.setTargetValue(TV); }" LLVM,NVPTX,175," const TargetTransformInfo::TargetTransformInfo &TTI) { const TargetTransformInfo::TargetTransformInfo &NVPTXTTI = TTI.getTargetTransformInfo(TargetTransformInfo::NVPTX); const TargetTransformInfo::TargetTransformInfo &NVPTXISDTTI = TTI.getTargetTransformInfo(TargetTransformInfo::NVPTX_ISD); const MachineInstr &CurrentMI = MBB.getInstruction(MI.getIndex()); const MachineOperand &CurrentMO = CurrentMI.getOperand(0); if (CurrentMI.getOpcode() == TargetOpcode::ISD_LOAD || CurrentMI.getOpcode() == TargetOpcode::ISD_STORE) { const TargetTransformInfo::TargetTransformInfo &LoadStoreTTI = TTI.getTargetTransformInfo(TargetTransformInfo::NVPTX_ISD_LOAD_STORE); if (LoadStoreTTI.canTransform(CurrentMI)) { LoadStoreTTI.transform(CurrentMI, CurrentMO); } } }" LLVM,NVPTX,176,"const char* getPassName(const char* passName) { if (passName == ""NVPTX"") { return ""NVPTX DAG->DAG Pattern Instruction Selection""; } else { return passName; } }" LLVM,NVPTX,177, LLVM,NVPTX,178," const TargetMachine& TM) { const AddressingMode& AM = TM.getAddressingMode(MO.getConstraint()); if (!AM.isSupported()) { IA.setError(""Unsupported addressing mode for constraint '"" + MO.getConstraint() + ""'""); return; } const TargetValue& TV = TM.getTargetValue(AM); if (!TV.isSupported()) { IA.setError(""Unsupported target-specific value for addressing mode '"" + AM.getName() + ""'""); return; } MO.setTargetValue(TV); }" LLVM,NVPTX,179, LLVM,NVPTX,180, SmallVectorImpl &Actions) { if (Ty->isIntegerTy(1)) { Actions.push_back(Intrinsic::nvptx_bitwise_and); Actions.push_back(Intrinsic::nvptx_bitwise_or); Actions.push_back(Intrinsic::nvptx_bitwise_xor); } else if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(16)) { Actions.push_back(Intrinsic::nvptx_vector_bitwise_and); Actions.push_back(Intrinsic::nvptx_vector_bitwise_or); Actions.push_back(Intrinsic::nvptx_vector_bitwise_xor); } } LLVM,NVPTX,181,"Value* getSqrtEstimate(Value* V, Intrinsic::ID ID) { switch (ID) { case Intrinsic::nvvm_rsqrt_approx_ftz_f: case Intrinsic::nvvm_rsqrt_approx_f: return Intrinsic::nvvm_rsqrt_approx_f(V); case Intrinsic::nvvm_rsqrt_approx_d: return Intrinsic::nvvm_rsqrt_approx_d(V); case Intrinsic::nvvm_sqrt_approx_ftz_f: case Intrinsic::nvvm_sqrt_approx_f: return Intrinsic::nvvm_sqrt_approx_f(V); case Intrinsic::nvvm_rcp_approx_ftz_d: return Intrinsic::nvvm_rcp_approx_ftz_d(V); default: return nullptr; } }" LLVM,NVPTX,182, LLVM,NVPTX,183,"void PerformDAGCombine(DAGNode *N, DAGNode *Target) { if (Target->isTargetIndependent()) { return; } if (Target->isTargetSpecific()) { if (Target->getTarget() == NVPTX) { if (Target->getOpcode() == ISD::ADD || Target->getOpcode() == ISD::FADD || Target->getOpcode() == ISD::MUL || Target->getOpcode() == ISD::SHL || Target->getOpcode() == ISD::AND || Target->getOpcode() == ISD::UREM || Target->getOpcode() == ISD::SREM || Target->getOpcode() == ISD::SETCC) { CombineNVPTXNode(N, Target); } } } }" LLVM,NVPTX,184, SmallVectorImpl &Actions) { if (Ty->isIntegerTy(1)) { Actions.push_back(Intrinsic::nvptx_bitwise_and); Actions.push_back(Intrinsic::nvptx_bitwise_or); Actions.push_back(Intrinsic::nvptx_bitwise_xor); } else if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(16)) { Actions.push_back(Intrinsic::nvptx_vector_bitwise_and); Actions.push_back(Intrinsic::nvptx_vector_bitwise_or); Actions.push_back(Intrinsic::nvptx_vector_bitwise_xor); } } LLVM,NVPTX,185,"bool isLegalAddressingMode(const Target& T, const AddressingMode& AM, const Type& Ty) { if (T.getTargetID() == NVPTX) { if (AM.getAddressingMode() == AddressingMode::NVPTX) { if (Ty.getTypeID() == Type::NVPTX) { return true; } } } return false; }" LLVM,NVPTX,186, LLVM,NVPTX,187, LLVM,NVPTX,188,"void getPreferredVectorAction(Type *Ty, Value *V, Instruction *I) { if (Ty->isVectorTy()) { if (Ty->getVectorNumElements() == 1) { if (Ty->getVectorElementType()->isIntegerTy(1)) { I->setPreferredLegalizationStrategy(LegalizeTypes::MVT::i1); } } } }" LLVM,NVPTX,189, LLVM,NVPTX,190,"bool isLegalAddressingMode(const Target& T, const AddressingMode& AM, const Type& Ty) { if (T.getTargetID() == NVPTX) { if (AM.getAddressingMode() == AddressingMode::NVPTX) { if (Ty.getTypeID() == Type::NVPTX) { return true; } } } return false; }" LLVM,NVPTX,191," const Target &Target) { if (Operand->isRegister()) { Ops.push_back(new AsmOperand(Operand->getRegister(), Operand->getType(), Operand->getConstraint())); return; } if (Operand->isMemoryReference()) { Ops.push_back(new AsmOperand(Operand->getMemoryReference(), Operand->getType(), Operand->getConstraint())); return; } if (Operand->isConstant()) { Ops.push_back(new AsmOperand(Operand->getConstant(), Operand->getType(), Operand->getConstraint())); return; } Ops.push_back(new AsmOperand(Operand->getLabel(), Operand->getType(), Operand->getConstraint())); }" LLVM,NVPTX,192,"void LowerOperation(LLVMContext &Context, ISD::Node *Node) { ISD::OperationType OpType = Node->getOperationType(); if (OpType == ISD::RETURNADDR || OpType == ISD::FRAMEADDR) { ISD::Node *Address = Node->getOperand(0); ISD::Node *NVPTXInst = new ISD::Node(ISD::NVPTX_RETURNADDR, Address); Node->replaceAllUsesWith(NVPTXInst); return; } if (OpType == ISD::GlobalAddress) { ISD::Node *Address = Node->getOperand(0); ISD::Node *NVPTXInst = new ISD::Node(ISD::NVPTX_GLOBALADDRESS, Address); Node->replaceAllUsesWith(NVPTXInst); return; } if (OpType == ISD::INTRINSIC_W_CHAIN) { ISD::Node *Intrinsic = Node->getOperand(0); ISD::Node *Chain = Node->getOperand(1); ISD::Node *NVPTXInst = new ISD::Node(ISD::NVPTX_INTRINSIC_W_CHAIN, Intrinsic, Chain); Node->replaceAllUsesWith(NVPTXInst); return; }" LLVM,NVPTX,193,void PerformDAGCombine(DAGNode *N) { TargetSpecificValue *TSValue = N->getTargetSpecificValue(); if (TSValue == nullptr) { return; } if (TSValue->getKind() == TargetSpecificValue::Kind::NVPTX) { NVPTXTargetSpecificValue *NVPTXTSValue = static_cast(TSValue); switch (NVPTXTSValue->getOpcode()) { case ISD::ADD: break; case ISD::FADD: break; case ISD::MUL: break; case ISD::SHL: break; case ISD::AND: break; case ISD::SELECT: break; default: return; } } } LLVM,NVPTX,194,"void ReplaceNodeResults(LLVMContext &Context, ISD::Node *Node) { if (Node->getResultType()->isIllegal()) { ISD::Node *NewNode = nullptr; switch (Node->getOpcode()) { case ISD::LOAD: NewNode = createCustomLoad(Context, Node); break; case ISD::INTRINSIC_W_CHAIN: NewNode = createCustomIntrinsic(Context, Node); break; default: NewNode = createCustomNode(Context, Node); break; } Node->replaceAllUsesWith(NewNode); Node->eraseFromParent(); } } ISD::Node *createCustomLoad(LLVMContext &Context, ISD::Node *Node) { ISD::Node *NewNode = ISD::Node::create(Context, ISD::LOAD, Node->getOperand(0), Node->getOperand(1)); NewNode->setResultType(Node->getResultType()); return NewNode; }" LLVM,NVPTX,195," const Instruction *FMul, const Instruction *FAdd) { if (FMA->getOpcode() != Instruction::FMA) return false; if (FMul->getOpcode() != Instruction::FMUL || FAdd->getOpcode() != Instruction::FADD) return false; if (FMA->getLatency() > FMul->getLatency() + FAdd->getLatency()) return true; if (FMA->getThroughput() > FMul->getThroughput() + FAdd->getThroughput()) return true; return false; }" LLVM,NVPTX,196,MVT getSetCCResultType(MVT::Type type) { switch (type) { case MVT::i1: return MVT::i1; case MVT::i8: return MVT::i8; case MVT::i16: return MVT::i16; case MVT::i32: return MVT::i32; case MVT::i64: return MVT::i64; case MVT::f32: return MVT::f32; case MVT::f64: return MVT::f64; default: return MVT::i1; } } LLVM,NVPTX,197,Type* getShiftAmountTy(const Instruction* I) { if (!I->isShiftOpcode()) { return nullptr; } Type* shiftAmountTy = I->getOperand(1)->getType(); if (shiftAmountTy->isConstant()) { return shiftAmountTy; } if (shiftAmountTy->isVariable()) { return shiftAmountTy; } if (shiftAmountTy->isPointer()) { return shiftAmountTy->getPointerElementType(); } if (shiftAmountTy->isVector()) { return shiftAmountTy->getVectorElementType(); } if (shiftAmountTy->isStruct()) { return shiftAmountTy->getStructElementType(0); } if (shiftAmountTy->isArray()) { return shiftAmountTy->getArrayElementType(); } if (shiftAmountTy->isFunction()) { return shiftAmountTy->getFunctionReturnType(); } LLVM,NVPTX,198, LLVM,NVPTX,199,"static InlineAsmRegClass getRegForInlineAsmConstraint(const InlineAsmConstraint &Constraint) { StringRef TargetSpecificValue = Constraint.getTargetSpecificValue(); if (TargetSpecificValue.startsWith(""NVPTX::"")) { StringRef RegClass = TargetSpecificValue.substr(6); if (RegClass == ""Int8RegsRegClass"" || RegClass == ""Int16RegsRegClass"" || RegClass == ""Int32RegsRegClass"" || RegClass == ""Int64RegsRegClass"" || RegClass == ""Float32RegsRegClass"" || RegClass == ""Float64RegsRegClass"") { return InlineAsmRegClass::get(RegClass); } } return InlineAsmRegClass::getEmpty(); }" LLVM,NVPTX,200, LLVM,NVPTX,201,"bool getTgtMemIntrinsic(Intrinsic::ID id) { switch (id) { case Intrinsic::nvvm_atomic_load_add_f32: case Intrinsic::nvvm_atomic_load_inc_32: case Intrinsic::nvvm_atomic_load_dec_32: case Intrinsic::nvvm_ldu_global_i: case Intrinsic::nvvm_ldu_global_f: case Intrinsic::nvvm_ldu_global_p: return true; default: return false; } } ``` This function uses a `switch` statement to check the `Intrinsic::ID` of the given intrinsic and return `true` if it is one of the intrinsics that touches memory, and `false` otherwise. You can use this function in your code like this: ``` if (getTgtMemIntrinsic(intrinsic)) { } else { }" LLVM,NVPTX,202," const Target &Target) { if (Operand->isRegister()) { Ops.push_back(new AsmOperand(Operand->getRegister(), Operand->getType(), Operand->getConstraint())); return; } if (Operand->isMemoryReference()) { Ops.push_back(new AsmOperand(Operand->getMemoryReference(), Operand->getType(), Operand->getConstraint())); return; } if (Operand->isConstant()) { Ops.push_back(new AsmOperand(Operand->getConstant(), Operand->getType(), Operand->getConstraint())); return; } Ops.push_back(new AsmOperand(Operand->getLabel(), Operand->getType(), Operand->getConstraint())); }" LLVM,NVPTX,203, LLVM,NVPTX,204," unsigned W, unsigned X, unsigned Y, unsigned Z) { Value *RetVal = Outs[0]; DAG *DAG = Context.getDAG(); DAG->addNode(DAG->createNode(ISD::EXTRACT_VECTOR_ELT, RetVal, 0)); DAG->addNode(DAG->createNode(NVPTXISD::StoreRetval, RetVal, 0)); DAG->addNode(DAG->createNode(NVPTXISD::MoveToRetval, RetVal, 0)); DAG->setTargetSpecificValue(MVT::Other, MVT::i32, 8, 8, NVPTXISD::RET_FLAG); }" LLVM,NVPTX,205, SmallVectorImpl &Actions) { if (Ty->isIntegerTy(1)) { Actions.push_back(Intrinsic::nvptx_bitwise_and); Actions.push_back(Intrinsic::nvptx_bitwise_or); Actions.push_back(Intrinsic::nvptx_bitwise_xor); } else if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(16)) { Actions.push_back(Intrinsic::nvptx_vector_bitwise_and); Actions.push_back(Intrinsic::nvptx_vector_bitwise_or); Actions.push_back(Intrinsic::nvptx_vector_bitwise_xor); } } LLVM,NVPTX,206," const Instruction *FMul, const Instruction *FAdd) { if (FMA->getOpcode() != Instruction::FMA) return false; if (FMul->getOpcode() != Instruction::FMUL || FAdd->getOpcode() != Instruction::FADD) return false; if (FMA->getLatency() > FMul->getLatency() + FAdd->getLatency()) return true; if (FMA->getThroughput() > FMul->getThroughput() + FAdd->getThroughput()) return true; return false; }" LLVM,NVPTX,207, LLVM,NVPTX,208, LLVM,NVPTX,209,"Type* getScalarShiftAmountTy(Type* ShiftedTy, Type* AmountTy) { if (ShiftedTy->isIntegerTy() && AmountTy->isIntegerTy()) { return ShiftedTy; } else if (ShiftedTy->isPointerTy() && AmountTy->isIntegerTy()) { return ShiftedTy->getPointerElementType(); } else { return nullptr; } }" LLVM,NVPTX,210,"MVT getSetCCResultType(MVT::Type type) { switch (type) { case MVT::i1: return MVT::i1; case MVT::i8: return MVT::i8; case MVT::i16: return MVT::i16; case MVT::i32: return MVT::i32; case MVT::i64: return MVT::i64; case MVT::f32: return MVT::f32; case MVT::f64: return MVT::f64; default: llvm_unreachable(""Invalid type for SETCC""); } }" LLVM,NVPTX,211,"bool isLegalAddressingMode(const Target& T, const AddressingMode& AM, const Type& Ty) { if (T.getTargetID() == NVPTX) { if (AM.getAddressingMode() == AddressingMode::NVPTX) { if (Ty.getTypeID() == Type::NVPTX) { return true; } } } return false; }" LLVM,NVPTX,212,"void LowerOperation(LLVMContext &Context, const Instruction *Inst) { const TargetSpecificValue *TSV = Inst->getTargetSpecificValue(); if (TSV && TSV->isCustomLowering()) { const CustomLoweringHook *Hook = TSV->getCustomLoweringHook(); Hook->invoke(Context, Inst); } else { Inst->lower(Context); } } ``` This function takes an `LLVMContext` and an `Instruction` as input, and it uses the `getTargetSpecificValue` method to get the target specific value for the operation. If the operation has a custom lowering hook, it invokes the hook using the `invoke` method. Otherwise, it uses the default lowering. Note that the `CustomLoweringHook` class is not part of the LLVM API, so you will need to define it yourself. Here's an example implementation: ``` class CustomLoweringHook { public: virtual void invoke(LLVMContext &Context, const Instruction *Inst) = 0; }; ``` You can then define custom lowering hooks for each operation that requires a custom lowering. For example: ``` class ReturnAddrLoweringHook : public CustomLoweringHook { public: void invoke(LLVMContext &Context, const Instruction *Inst) { Inst->lower(Context); } }; class FrameAddrLoweringHook : public CustomLoweringHook { public: void invoke(LLVMContext &Context, const Instruction *Inst) { Inst->lower(Context); }" LLVM,NVPTX,213,"void PerformDAGCombine(DAGNode *Node, DAGNode *Parent, DAGNode *Child) { if (Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::FADD || Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL || Node->getOpcode() == ISD::AND) { if (Parent->getOpcode() == ISD::ADD || Parent->getOpcode() == ISD::FADD || Parent->getOpcode() == ISD::MUL || Parent->getOpcode() == ISD::SHL || Parent->getOpcode() == ISD::AND) { if (Child->getOpcode() == ISD::ADD || Child->getOpcode() == ISD::FADD || Child->getOpcode() == ISD::MUL || Child->getOpcode() == ISD::SHL || Child->getOpcode() == ISD::AND) { } } } }" LLVM,NVPTX,214,"void PerformDAGCombine(DAGNode *N) { TargetSpecificValue *TSVal = N->getTargetSpecificValue(); if (TSVal) { Value *Val = N->getValue(); Value *CombinedVal = CombineValues(Val, TSVal); N->setValue(CombinedVal); } }" LLVM,NVPTX,215,"bool shouldCastAtomicLoadInIR(LLVMContext &Context, LoadInst *Load) { if (!Load->isAtomic()) { return false; } if (Load->getAtomicOrdering() != AtomicOrdering::NVPTX) { return false; } if (!Load->getPointerOperand()->getType()->isPointerTy()) { return false; } if (!Load->getPointerOperand()->getType()->getPointerElementType()->isNVPTX()) { return false; } if (Load->getAtomicOrdering() != AtomicOrdering::NVPTX) { return false; } if (Load->getMemoryOrdering() != MemoryOrdering::NVPTX) { return false; } if (Load->getSynchronizationScope() != SynchronizationScope::NVPTX) { return false; }" LLVM,NVPTX,216,bool shouldCastAtomicStoreInIR(AtomicStoreInst *ASI) { if (ASI->getAtomicOrdering() == AtomicOrdering::NVPTX) { if (ASI->getPointerOperand()->getType()->isPointerTy()) { if (ASI->getPointerOperand()->getType()->getPointerElementType()->isNVPTXPointerTy()) { return true; } } } return false; } LLVM,NVPTX,217, LLVM,NVPTX,218, LLVM,NVPTX,219,bool getTgtMemIntrinsic(Intrinsic::ID id) { switch (id) { case Intrinsic::nvvm_atomic_load_add_f32: case Intrinsic::nvvm_atomic_load_inc_32: case Intrinsic::nvvm_atomic_load_dec_32: case Intrinsic::nvvm_ldu_global_i: case Intrinsic::nvvm_ldu_global_f: case Intrinsic::nvvm_ldu_global_p: return true; default: return false; } } LLVM,NVPTX,220, LLVM,NVPTX,221," SmallVectorImpl &Outs) { SDValue RetVal = Chain.getOperand(0); if (RetVal.getNode()->getOpcode() == ISD::RET_FLAG) { if (RetVal.getNode()->getType() == MVT::i32) { RetVal = DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, RetVal); } else { RetVal = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, RetVal); }" LLVM,NVPTX,222,MVT getSetCCResultType(MVT::Type type) { switch (type) { case MVT::i1: return MVT::i1; case MVT::i8: return MVT::i8; case MVT::i16: return MVT::i16; case MVT::i32: return MVT::i32; case MVT::i64: return MVT::i64; case MVT::f32: return MVT::f32; case MVT::f64: return MVT::f64; default: return MVT::i1; } } LLVM,NVPTX,223,"Type* getScalarShiftAmountTy(Type* ShiftedTy, Type* AmountTy) { if (ShiftedTy->isIntegerTy() && AmountTy->isIntegerTy()) { return ShiftedTy; } else if (ShiftedTy->isPointerTy() && AmountTy->isIntegerTy()) { return ShiftedTy->getPointerElementType(); } else { return nullptr; } }" LLVM,NVPTX,224,MVT getSetCCResultType(MVT::Type type) { switch (type) { case MVT::i1: return MVT::i1; case MVT::i8: return MVT::i8; case MVT::i16: return MVT::i16; case MVT::i32: return MVT::i32; case MVT::i64: return MVT::i64; case MVT::f32: return MVT::f32; case MVT::f64: return MVT::f64; default: return MVT::i1; } } LLVM,NVPTX,225,"void PerformDAGCombine(DAGNode *Node, DAGNode *Parent, DAGNode *Child) { if (Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::FADD || Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL || Node->getOpcode() == ISD::AND) { if (Parent->getOpcode() == ISD::ADD || Parent->getOpcode() == ISD::FADD || Parent->getOpcode() == ISD::MUL || Parent->getOpcode() == ISD::SHL || Parent->getOpcode() == ISD::AND) { if (Child->getOpcode() == ISD::ADD || Child->getOpcode() == ISD::FADD || Child->getOpcode() == ISD::MUL || Child->getOpcode() == ISD::SHL || Child->getOpcode() == ISD::AND) { } } } }" LLVM,NVPTX,226,"void ReplaceNodeResults(LLVMContext &Context, ISD::Node *Node) { if (Node->getResultType()->isIllegal()) { ISD::Node *NewNode = nullptr; switch (Node->getOpcode()) { case ISD::LOAD: NewNode = createCustomLoad(Context, Node); break; case ISD::INTRINSIC_W_CHAIN: NewNode = createCustomIntrinsic(Context, Node); break; default: NewNode = createCustomNode(Context, Node); break; } Node->replaceAllUsesWith(NewNode); Node->eraseFromParent(); } } ISD::Node *createCustomLoad(LLVMContext &Context, ISD::Node *Node) { ISD::Node *NewNode = ISD::Node::create(Context, ISD::LOAD, Node->getOperand(0), Node->getOperand(1)); NewNode->setResultType(Node->getResultType()); return NewNode; }" LLVM,NVPTX,227, LLVM,NVPTX,228,ValueType getSetCCResultType(const Instruction *Inst) { if (Inst->getOpcode() == Instruction::SETCC) { return MVT::i1; } return Inst->getType(); } LLVM,NVPTX,229," unsigned W, unsigned X, unsigned Y, unsigned Z) { Value *RetVal = Outs[0]; DAG *DAG = Context.getDAG(); DAG->addNode(DAG->createNode(ISD::EXTRACT_VECTOR_ELT, RetVal, 0)); DAG->addNode(DAG->createNode(NVPTXISD::StoreRetval, RetVal, 0)); DAG->addNode(DAG->createNode(NVPTXISD::MoveToRetval, RetVal, 0)); DAG->setTargetSpecificValue(MVT::Other, MVT::i32, 8, 8, NVPTXISD::RET_FLAG); }" LLVM,NVPTX,230,void PerformDAGCombine(DAGNode *N) { TargetSpecificValue *TSVal = N->getTargetSpecificValue(); if (TSVal->getKind() == TargetSpecificValue::Kind::TargetIndependent) { return; } LLVM,NVPTX,231,"void PerformDAGCombine(DAGNode *Node, DAGNode *Parent, DAGNode *Child) { if (Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::FADD || Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL || Node->getOpcode() == ISD::AND) { if (Parent->getOpcode() == ISD::ADD || Parent->getOpcode() == ISD::FADD || Parent->getOpcode() == ISD::MUL || Parent->getOpcode() == ISD::SHL || Parent->getOpcode() == ISD::AND) { if (Child->getOpcode() == ISD::ADD || Child->getOpcode() == ISD::FADD || Child->getOpcode() == ISD::MUL || Child->getOpcode() == ISD::SHL || Child->getOpcode() == ISD::AND) { } } } }" LLVM,NVPTX,232, LLVM,NVPTX,233,"unsigned getFunctionParamOptimizedAlign(Function *F, unsigned Align) { if (F->hasNonLocalLinkage()) { Align = std::max(Align, 16u); } return Align; }" LLVM,NVPTX,234,"void PerformDAGCombine(DAGCombine &Combine) { Combine.addTargetSpecificValue(NVPTX, NVPTX, ISD::ADD); Combine.addTargetSpecificValue(NVPTX, NVPTX, ISD::FADD); Combine.addTargetSpecificValue(NVPTX, NVPTX, ISD::MUL); Combine.addTargetSpecificValue(NVPTX, NVPTX, ISD::SHL); Combine.addTargetSpecificValue(NVPTX, NVPTX, ISD::AND); Combine.addTargetSpecificValue(NVPTX, NVPTX, ISD::UREM); Combine.addTargetSpecificValue(NVPTX, NVPTX, ISD::SREM); Combine.addTargetSpecificValue(NVPTX, NVPTX, ISD::SETCC); Combine.addTargetIndependentValue(NVPTX, NVPTXISD::StoreRetval); Combine.addTargetIndependentValue(NVPTX, NVPTXISD::StoreRetvalV2); Combine.addTargetIndependentValue(NVPTX, NVPTXISD::StoreRetvalV4); }" LLVM,NVPTX,235,"bool isLegalAddressingMode(const Target& T, const AddressingMode& AM, const Type& Ty) { if (T.getTargetID() == NVPTX) { if (AM.getAddressingMode() == AddressingMode::NVPTX) { if (Ty.getTypeID() == Type::NVPTX) { return true; } } } return false; }" LLVM,NVPTX,236,bool shouldExpandAtomicRMWInIR(AtomicRMWInst *ARMW) { if (!ARMW->getType()->isIntegerTy()) { return false; } if (ARMW->getType()->getIntegerBitWidth() != 8 && ARMW->getType()->getIntegerBitWidth() != 16 && ARMW->getType()->getIntegerBitWidth() != 32 && ARMW->getType()->getIntegerBitWidth() != 64) { return false; } if (ARMW->getOperation() != AtomicRMWInst::Add && ARMW->getOperation() != AtomicRMWInst::Sub && ARMW->getOperation() != AtomicRMWInst::And && ARMW->getOperation() != AtomicRMWInst::Or && ARMW->getOperation() != AtomicRMWInst::Xor && ARMW->getOperation() != AtomicRMWInst::Min && ARMW->getOperation() != AtomicRMWInst::Max) { return false; } if (ARMW->getSyncScope() != AtomicRMWInst::SyncScope::None && ARMW->getSyncScope() != AtomicRMWInst::SyncScope::System) { return false; } LLVM,NVPTX,237, LLVM,NVPTX,238,"void ReplaceNodeResults(LLVMContext &Context, ISD::Node *Node) { if (Node->getResultType()->isIllegal()) { ISD::Node *NewNode = nullptr; switch (Node->getOpcode()) { case ISD::LOAD: NewNode = createCustomLoad(Context, Node); break; case ISD::INTRINSIC_W_CHAIN: NewNode = createCustomIntrinsic(Context, Node); break; default: NewNode = createCustomNode(Context, Node); break; } Node->replaceAllUsesWith(NewNode); Node->eraseFromParent(); } } ISD::Node *createCustomLoad(LLVMContext &Context, ISD::Node *Node) { ISD::Node *NewNode = ISD::Node::create(Context, ISD::LOAD, Node->getOperand(0), Node->getOperand(1)); NewNode->setResultType(Node->getResultType()); return NewNode; }" LLVM,NVPTX,239,"static InlineAsmRegClass getRegForInlineAsmConstraint(const InlineAsmConstraint &Constraint) { StringRef TargetSpecificValue = Constraint.getTargetSpecificValue(); if (TargetSpecificValue.startsWith(""NVPTX::"")) { StringRef RegClass = TargetSpecificValue.substr(6); if (RegClass == ""Int8RegsRegClass"" || RegClass == ""Int16RegsRegClass"" || RegClass == ""Int32RegsRegClass"" || RegClass == ""Int64RegsRegClass"" || RegClass == ""Float32RegsRegClass"" || RegClass == ""Float64RegsRegClass"") { return InlineAsmRegClass::get(RegClass); } } return InlineAsmRegClass::getEmpty(); }" LLVM,NVPTX,240, LLVM,NVPTX,241,"bool getTgtMemIntrinsic(Intrinsic::ID id) { switch (id) { case Intrinsic::nvvm_atomic_load_add_f32: case Intrinsic::nvvm_atomic_load_inc_32: case Intrinsic::nvvm_atomic_load_dec_32: case Intrinsic::nvvm_ldu_global_i: case Intrinsic::nvvm_ldu_global_f: case Intrinsic::nvvm_ldu_global_p: return true; default: return false; } } ``` This function uses a `switch` statement to check the `Intrinsic::ID` of the given intrinsic and return `true` if it is one of the intrinsics that touches memory, and `false` otherwise. You can use this function in your code like this: ``` if (getTgtMemIntrinsic(intrinsic)) { } else { }" LLVM,NVPTX,242," unsigned W, unsigned X, unsigned Y, unsigned Z) { Value *RetVal = Outs[0]; DAG *DAG = Context.getDAG(); DAG->addNode(DAG->createNode(ISD::EXTRACT_VECTOR_ELT, RetVal, 0)); DAG->addNode(DAG->createNode(NVPTXISD::StoreRetval, RetVal, 0)); DAG->addNode(DAG->createNode(NVPTXISD::MoveToRetval, RetVal, 0)); DAG->setTargetSpecificValue(MVT::Other, MVT::i32, 8, 8, NVPTXISD::RET_FLAG); }" LLVM,NVPTX,243, LLVM,NVPTX,244,"bool isLegalAddressingMode(const Target& T, const AddressingMode& AM, const Type& Ty) { if (T.getTargetID() == NVPTX) { if (AM.getAddressingMode() == AddressingMode::NVPTX) { if (Ty.getTypeID() == Type::NVPTX) { return true; } } } return false; }" LLVM,NVPTX,245,"static InlineAsmRegClass getRegForInlineAsmConstraint(const InlineAsmConstraint &Constraint) { StringRef TargetSpecificValue = Constraint.getTargetSpecificValue(); if (TargetSpecificValue.startsWith(""NVPTX::"")) { StringRef RegClass = TargetSpecificValue.substr(6); if (RegClass == ""Int8RegsRegClass"" || RegClass == ""Int16RegsRegClass"" || RegClass == ""Int32RegsRegClass"" || RegClass == ""Int64RegsRegClass"" || RegClass == ""Float32RegsRegClass"" || RegClass == ""Float64RegsRegClass"") { return InlineAsmRegClass::get(RegClass); } } return InlineAsmRegClass::getEmpty(); }" LLVM,NVPTX,246," unsigned W, unsigned X, unsigned Y, unsigned Z) { Value *RetVal = Outs[0]; DAG *DAG = Context.getDAG(); DAG->addNode(DAG->createNode(ISD::EXTRACT_VECTOR_ELT, RetVal, 0)); DAG->addNode(DAG->createNode(NVPTXISD::StoreRetval, RetVal, 0)); DAG->addNode(DAG->createNode(NVPTXISD::MoveToRetval, RetVal, 0)); DAG->setTargetSpecificValue(MVT::Other, MVT::i32, 8, 8, NVPTXISD::RET_FLAG); }" LLVM,NVPTX,247,"bool combineRepeatedFPDivisors(const Target& target) { if (target.getTargetSpecificValue() == NVPTX,2) { return true; } return false; }" LLVM,NVPTX,248,bool enableAggressiveFMAFusion(Type *Ty) { if (Ty->getTarget() != Target::NVPTX) return false; if (!Ty->isFloatingPoint()) return false; if (!Ty->isVector()) return false; if (Ty->getScalarSizeInBits() != 32 && Ty->getScalarSizeInBits() != 64) return false; if (Ty->getVectorNumElements() != 2 && Ty->getVectorNumElements() != 4) return false; if (Ty->getScalarType() != Type::FloatTy) return false; if (Ty->getVectorElementType() != Type::FloatTy) return false; if (Ty->getScalarSizeInBits() != 32 && Ty->getScalarSizeInBits() != 64) return false; if (Ty->getVectorNumElements() != 2 && Ty->getVectorNumElements() != 4) return false; return true; } LLVM,NVPTX,249,"Type* getScalarShiftAmountTy(Type* ShiftedTy, Type* AmountTy) { if (ShiftedTy->isIntegerTy() && AmountTy->isIntegerTy()) { return ShiftedTy; } else if (ShiftedTy->isPointerTy() && AmountTy->isIntegerTy()) { return ShiftedTy->getPointerElementType(); } else { return nullptr; } }" LLVM,NVPTX,250,"MVT getSetCCResultType(MVT::Type type) { switch (type) { case MVT::i1: return MVT::i1; case MVT::i8: return MVT::i8; case MVT::i16: return MVT::i16; case MVT::i32: return MVT::i32; case MVT::i64: return MVT::i64; case MVT::f32: return MVT::f32; case MVT::f64: return MVT::f64; default: llvm_unreachable(""Invalid type for SETCC""); } }" LLVM,NVPTX,251,"bool isCheapToSpeculateCtlz(Function *F, TargetMachine *TM) { if (F->getIntrinsicID() != Intrinsic::ctlz) return false; if (!TM->hasFeature(TargetFeature::CTLZ)) return false; if (TM->getCost(Intrinsic::ctlz) > 10) return false; return true; }" LLVM,NVPTX,252," const Instruction *FMul, const Instruction *FAdd) { if (FMA->getOpcode() != Instruction::FMA) return false; if (FMul->getOpcode() != Instruction::FMUL || FAdd->getOpcode() != Instruction::FADD) return false; if (FMA->getLatency() > FMul->getLatency() + FAdd->getLatency()) return true; if (FMA->getThroughput() > FMul->getThroughput() + FAdd->getThroughput()) return true; return false; }" LLVM,NVPTX,253, LLVM,NVPTX,254,"const char *getPassName(const Pass *P) { if (P->getPassKind() == PassKind::NVPTX) { return ""Lower aggregate copies/intrinsics into loops""; } else { return P->getPassName(); } }" LLVM,NVPTX,255,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,256,"class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} bool runOnFunction(Function &F) override { TargetSelect.addTarget(TargetSelect.NVPTX, 4, 4, 0, 0, 0, 0, 1); return false; }" LLVM,NVPTX,257,"const char *getPassName(const Pass *P) { if (P->getPassKind() == PassKind::NVPTX) { return ""Lower aggregate copies/intrinsics into loops""; } else { return P->getPassName(); } }" LLVM,NVPTX,258,"class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} bool runOnFunction(Function &F) override { TargetSelect.addTarget(TargetSelect.NVPTX, 4, 4, 0, 0, 0, 0, 1); return false; }" LLVM,NVPTX,259,"class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} bool runOnFunction(Function &F) override { TargetSelect.addTarget(TargetSelect.NVPTX, 4, 4, 0, 0, 0, 0, 1); return false; }" LLVM,NVPTX,260,"const char *getPassName(const Pass *P) { if (P->getPassKind() == PassKind::NVPTX) { return ""Lower aggregate copies/intrinsics into loops""; } else { return P->getPassName(); } }" LLVM,NVPTX,261,"const char *getPassName(const Pass *P) { if (P->getPassKind() == PassKind::NVPTX) { return ""NVPTX - Convert address space of alloca'ed memory to local""; } return P->getPassName(); }" LLVM,NVPTX,262,"const char *getPassName(const Pass *P) { if (P->getPassKind() == PassKind::NVPTX) { return ""NVPTX - Convert address space of alloca'ed memory to local""; } return P->getPassName(); }" LLVM,NVPTX,263,"void runOnFunction(Function &F) { NVPTX NVPTX; NVPTX.setTargetTriple(""nvptx64-nvidia-cuda""); NVPTX.setTargetArch(""sm_35""); NVPTX.setTargetABI(""nvptx""); NVPTX.setTargetOS(""linux""); NVPTX.setTargetEnvironment(""cuda""); Function *F = &F; F->setName(""my_function""); F->setReturnType(Type::getVoidTy(F->getContext())); F->setFunctionType(FunctionType::get(F->getReturnType(), {}, false)); runOnFunctionImpl(F, NVPTX); }" LLVM,NVPTX,264,"void runOnFunction(Function &F) { NVPTX NVPTX; NVPTX.setTargetTriple(""nvptx64-nvidia-cuda""); NVPTX.setTargetArch(""sm_35""); NVPTX.setTargetABI(""nvptx""); NVPTX.setTargetOS(""linux""); NVPTX.setTargetEnvironment(""cuda""); Function *F = &F; F->setName(""my_function""); F->setReturnType(Type::getVoidTy(F->getContext())); F->setFunctionType(FunctionType::get(F->getReturnType(), {}, false)); runOnFunctionImpl(F, NVPTX); }" LLVM,NVPTX,265,"void runOnFunction(Function &F) { NVPTX NVPTX; NVPTX.setTargetTriple(""nvptx64-nvidia-cuda""); NVPTX.setTargetArch(""sm_35""); NVPTX.setTargetABI(""nvptx""); NVPTX.setTargetOS(""linux""); NVPTX.setTargetEnvironment(""cuda""); Function *F = &F; F->setName(""my_function""); F->setReturnType(Type::getVoidTy(F->getContext())); F->setFunctionType(FunctionType::get(F->getReturnType(), {}, false)); runOnFunctionImpl(F, NVPTX); }" LLVM,NVPTX,266,"void runOnFunction(Function &F) { NVPTX NVPTX; NVPTX.setTargetTriple(""nvptx64-nvidia-cuda""); NVPTX.setTargetArch(""sm_35""); NVPTX.setTargetABI(""nvptx""); NVPTX.setTargetOS(""linux""); NVPTX.setTargetEnvironment(""cuda""); Function *F = &F; F->setName(""my_function""); F->setReturnType(Type::getVoidTy(F->getContext())); F->setFunctionType(FunctionType::get(F->getReturnType(), {}, false)); runOnFunctionImpl(F, NVPTX); }" LLVM,NVPTX,267,"const char *getPassName(const Pass *P) { if (P->getPassKind() == PassKind::NVPTX) { return ""Lower pointer arguments of CUDA kernels""; } else { return P->getPassName(); } }" LLVM,NVPTX,268,class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} virtual bool runOnFunction(Function &F) { return false; } LLVM,NVPTX,269,"const char *getPassName(const Pass *P) { if (P->getPassKind() == PassKind::NVPTX) { return ""Lower pointer arguments of CUDA kernels""; } else { return P->getPassName(); } }" LLVM,NVPTX,270, LLVM,NVPTX,271, LLVM,NVPTX,272,"const char *getPassName(const Pass *pass) { if (pass->getPassKind() == PassKind::NVPTX) { return ""NVPTX Copy structure (byval *) arguments to stack""; } else { return pass->getPassName(); } }" LLVM,NVPTX,273,class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} virtual bool runOnFunction(Function &F) { return false; } LLVM,NVPTX,274,"const char *getImageHandleSymbol(int index) { switch (index) { case 0: return ""__nvptx_image_handle_0""; case 1: return ""__nvptx_image_handle_1""; case 2: return ""__nvptx_image_handle_2""; case 3: return ""__nvptx_image_handle_3""; case 4: return ""__nvptx_image_handle_4""; case 5: return ""__nvptx_image_handle_5""; case 6: return ""__nvptx_image_handle_6""; case 7: return ""__nvptx_image_handle_7""; case 8: return ""__nvptx_image_handle_8""; case 9: return ""__nvptx_image_handle_9""; case 10: return ""__nvptx_image_handle_10""; case 11: return ""__nvptx_image_handle_11""; case 12: return ""__nvptx_image_handle_12""; case 13: return ""__nvptx_image_handle_13""; case 14: return ""__nvptx_image_handle_14""; case 15: return ""__nvptx_image_handle_15""; default: return ""Bad index""; } }" LLVM,NVPTX,275,unsigned getImageHandleSymbolIndex(const char *Symbol) { const llvm::SymbolTable *SymTab = llvm::getSymbolTable(llvm::NVPTX); const llvm::Symbol *Sym = SymTab->lookup(Symbol); if (!Sym) { return 0; } unsigned Index = Sym->getIndex(); return Index; } LLVM,NVPTX,276,"bool shouldOmitSectionDirective(StringRef SectionName) { if (SectionName.contains(""NVPTX"")) { return true; } return false; } ``` This function takes a `StringRef` argument `SectionName` and checks if it contains the NVPTX target specific value. If it does, the function returns `true` to omit the `.section` directive, otherwise it returns `false` to include the directive. You can use this function in the `NVPTXTargetAsmInfo` class to implement the `shouldOmitSectionDirective` method, like this: ``` class NVPTXTargetAsmInfo : public TargetAsmInfo { ... bool shouldOmitSectionDirective(StringRef SectionName) const override { return shouldOmitSectionDirective(SectionName); }" LLVM,NVPTX,277,"bool classof(Type *Ty, const NVPTX::Type *NVPTXTy) { if (Ty->isPointerTy()) { if (NVPTXTy->isPointerTy()) { return true; } } else if (Ty->isIntegerTy()) { if (NVPTXTy->isIntegerTy()) { return true; } } else if (Ty->isFloatingPointTy()) { if (NVPTXTy->isFloatingPointTy()) { return true; } } else if (Ty->isArrayTy()) { if (NVPTXTy->isArrayTy()) { return true; } } else if (Ty->isStructTy()) { if (NVPTXTy->isStructTy()) { return true; } } else if (Ty->isFunctionTy()) { if (NVPTXTy->isFunctionTy()) { return true; } } return false; }" LLVM,NVPTX,278,"static const char *findAssociatedFragment(const Value *V) { if (isa(V)) { return ""absolute""; } const Instruction *I = dyn_cast(V); if (!I) { return nullptr; } const BasicBlock *BB = I->getParent(); if (!BB) { return nullptr; } const Function *F = BB->getParent(); if (!F) { return nullptr; } const Module *M = F->getParent(); if (!M) { return nullptr; } for (const GlobalValue &GV : M->globals()) { if (GV.getName() == I->getName()) { return GV.getSection(); } } return nullptr; }" LLVM,NVPTX,279,APFloat getAPFloat(Value *V) { if (auto *C = dyn_cast(V)) { return APFloat(C->getValueAPF()); } else if (auto *I = dyn_cast(V)) { if (I->getOpcode() == Instruction::FPToFP) { return APFloat(I->getOperand(0)->getValueAPF()); } else { return APFloat(I->getOperand(0)->getValueAPF()); } } else { return APFloat(0.0); } } LLVM,NVPTX,280,Kind getKind(unsigned laneOffset) { switch (laneOffset) { case 0: return Kind::Lane0; case 1: return Kind::Lane1; case 2: return Kind::Lane2; case 3: return Kind::Lane3; case 4: return Kind::Lane4; case 5: return Kind::Lane5; case 6: return Kind::Lane6; case 7: return Kind::Lane7; default: return Kind::Invalid; } } LLVM,NVPTX,281,Value *getSymbolExpr(Value *V) { if (auto *I = dyn_cast(V)) { if (I->getOpcode() == Instruction::GetElementPtr) { return I->getOperand(0); } else if (I->getOpcode() == Instruction::Call) { return I->getOperand(0); } } else if (auto *C = dyn_cast(V)) { if (C->getType()->isPointerTy()) { return C; } } return nullptr; } LLVM,NVPTX,282,struct NVPTX { int NVPTX; int NVPTX; int NVPTX; int NVPTX; }; NVPTX create() { NVPTX nvptx; nvptx.NVPTX = 0; nvptx.NVPTX = 0; nvptx.NVPTX = 0; nvptx.NVPTX = 0; return nvptx; } LLVM,NVPTX,283,struct NVPTX { int NVPTX; int NVPTX; int NVPTX; int NVPTX; }; NVPTX create() { NVPTX nvptx; nvptx.NVPTX = 0; nvptx.NVPTX = 0; nvptx.NVPTX = 0; nvptx.NVPTX = 0; return nvptx; } LLVM,NVPTX,284,"void Create(LLVMContext& Context, Opcode Op, Operand* Operands, unsigned NumOperands, Instruction* Inst) { if (!IsSupportedOpcode(Op)) { return; } Instruction* UnaryInst = new UnaryInstruction(Op, Operands, NumOperands, Inst); UnaryInst->setTargetSpecificValue(NVPTX, NVPTX, NVPTX, NVPTX); Context.addInstruction(UnaryInst); }" LLVM,NVPTX,285,"const char *getPassName(const Pass *pass) { if (pass->getType() == NVPTX) { return ""NVPTX optimize redundant cvta.to.local instruction""; } else { return ""Unknown pass""; } }" LLVM,NVPTX,286,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const NVPTXTargetMachine *NVTM = static_cast(TM); const NVPTXSubtarget *ST = NVTM->getSubtarget(); const NVPTXTargetInfo *TI = ST->getTargetInfo(); const VRFrame *VRF = TI->getVRFrame(); MF.emitFunctionBody(VRF); } LLVM,NVPTX,287,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const NVPTXTargetMachine *NVTM = static_cast(TM); const NVPTXSubtarget *ST = NVTM->getSubtarget(); const NVPTXTargetInfo *TI = ST->getTargetInfo(); const VRFrame *VRF = TI->getVRFrame(); MF.emitFunctionBody(VRF); } LLVM,NVPTX,288,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const NVPTXTargetMachine *NVTM = static_cast(TM); const NVPTXSubtarget *ST = NVTM->getSubtarget(); const NVPTXTargetInfo *TI = ST->getTargetInfo(); const VRFrame *VRF = TI->getVRFrame(); MF.emitFunctionBody(VRF); } LLVM,NVPTX,289,"void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const NVPTXSubtarget &ST = MF.getSubtarget(); MF.emitFunctionBody(T, ST); }" LLVM,NVPTX,290,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,291,"const char *getPassName(const Pass *pass) { if (pass->getType() == NVPTX) { return ""NVPTX optimize redundant cvta.to.local instruction""; } else { return ""Unknown pass""; } }" LLVM,NVPTX,292,"void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const NVPTXSubtarget &ST = MF.getSubtarget(); MF.emitFunctionBody(T, ST); }" LLVM,NVPTX,293,void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const TargetMachine &TM = MF.getTargetMachine(); const NVPTXTargetMachine &NVTM = static_cast(TM); const NVPTXSubtarget &ST = NVTM.getSubtarget(); Function *F = MF.getFunction(); BasicBlock *BB = F->getEntryBlock(); IRBuilder<> Builder(BB); Builder.CreatePrologue(F); for (auto &BB : F->getBasicBlocks()) { Builder.CreateBlockPrologue(BB); for (auto &I : BB->getInstructions()) { Builder.CreateInstruction(I); } Builder.CreateBlockEpilogue(BB); } Builder.CreateEpilogue(F); } LLVM,NVPTX,294,void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const TargetMachine &TM = MF.getTargetMachine(); const NVPTXTargetMachine &NVTM = static_cast(TM); const NVPTXSubtarget &ST = NVTM.getSubtarget(); Function *F = MF.getFunction(); BasicBlock *BB = F->getEntryBlock(); IRBuilder<> Builder(BB); Builder.CreatePrologue(F); for (auto &BB : F->getBasicBlocks()) { Builder.CreateBlockPrologue(BB); for (auto &I : BB->getInstructions()) { Builder.CreateInstruction(I); } Builder.CreateBlockEpilogue(BB); } Builder.CreateEpilogue(F); } LLVM,NVPTX,295,"void runOnMachineFunction(MachineFunction &MF) { unsigned int FrameIndices[3]; for (unsigned int i = 0; i < 3; i++) { FrameIndices[i] = MF.getFrameIndex(i); } for (unsigned int i = 0; i < 3; i++) { MF.emitDebugValue(FrameIndices[i], i); } }" LLVM,NVPTX,296, LLVM,NVPTX,297,"void runOnMachineFunction(MachineFunction &MF) { unsigned int FrameIndices[3]; for (unsigned int i = 0; i < 3; i++) { FrameIndices[i] = MF.getFrameIndex(i); } for (unsigned int i = 0; i < 3; i++) { MF.emitDebugValue(FrameIndices[i], i); } }" LLVM,NVPTX,298,"void runOnMachineFunction(MachineFunction &MF) { unsigned int FrameIndices[3]; for (unsigned int i = 0; i < 3; i++) { FrameIndices[i] = MF.getFrameIndex(i); } for (unsigned int i = 0; i < 3; i++) { MF.emitDebugValue(FrameIndices[i], i); } }" LLVM,NVPTX,299,void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const TargetMachine &TM = MF.getTargetMachine(); const NVPTXTargetMachine &NVTM = static_cast(TM); const NVPTXSubtarget &ST = NVTM.getSubtarget(); Function *F = MF.getFunction(); BasicBlock *BB = F->getEntryBlock(); IRBuilder<> Builder(BB); Builder.CreatePrologue(F); for (auto &BB : F->getBasicBlocks()) { Builder.CreateBlockPrologue(BB); for (auto &I : BB->getInstructions()) { Builder.CreateInstruction(I); } Builder.CreateBlockEpilogue(BB); } Builder.CreateEpilogue(F); } LLVM,NVPTX,300,void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const TargetMachine &TM = MF.getTargetMachine(); const NVPTXTargetMachine &NVTM = static_cast(TM); const NVPTXSubtarget &ST = NVTM.getSubtarget(); Function *F = MF.getFunction(); BasicBlock *BB = F->getEntryBlock(); IRBuilder<> Builder(BB); Builder.CreatePrologue(F); for (auto &BB : F->getBasicBlocks()) { Builder.CreateBlockPrologue(BB); for (auto &I : BB->getInstructions()) { Builder.CreateInstruction(I); } Builder.CreateBlockEpilogue(BB); } Builder.CreateEpilogue(F); } LLVM,NVPTX,301,void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const TargetMachine &TM = MF.getTargetMachine(); const NVPTXTargetMachine &NVTM = static_cast(TM); const NVPTXSubtarget &ST = NVTM.getSubtarget(); Function *F = MF.getFunction(); BasicBlock *BB = F->getEntryBlock(); IRBuilder<> Builder(BB); Builder.CreatePrologue(F); for (auto &BB : F->getBasicBlocks()) { Builder.CreateBlockPrologue(BB); for (auto &I : BB->getInstructions()) { Builder.CreateInstruction(I); } Builder.CreateBlockEpilogue(BB); } Builder.CreateEpilogue(F); } LLVM,NVPTX,302,void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const TargetMachine &TM = MF.getTargetMachine(); const NVPTXTargetMachine &NVTM = static_cast(TM); const NVPTXSubtarget &ST = NVTM.getSubtarget(); Function *F = MF.getFunction(); BasicBlock *BB = F->getEntryBlock(); IRBuilder<> Builder(BB); Builder.CreatePrologue(F); for (auto &BB : F->getBasicBlocks()) { Builder.CreateBlockPrologue(BB); for (auto &I : BB->getInstructions()) { Builder.CreateInstruction(I); } Builder.CreateBlockEpilogue(BB); } Builder.CreateEpilogue(F); } LLVM,NVPTX,303, LLVM,NVPTX,304,"void runOnMachineFunction(MachineFunction &MF) { TargetSpecificValue *NVPTX = MF.getTargetSpecificValue(); for (auto &BB : MF.getBasicBlocks()) { for (auto &I : BB.getInstructions()) { if (I.getOpcode() == Instruction::DBG_VALUE) { DebugOperand *DO = I.getDebugOperand(); if (DO->getKind() == DebugOperand::FrameIndex) { MF.emitInstruction(new MachineInstruction( Instruction::DBG_VALUE, DO->getFrameIndex(), DO->getType(), DO->getDebugLoc())); } } } } }" LLVM,NVPTX,305,"void runOnMachineFunction(MachineFunction &MF) { TargetSpecificValue *NVPTX = MF.getTargetSpecificValue(); for (auto &BB : MF.getBasicBlocks()) { for (auto &I : BB.getInstructions()) { if (I.getOpcode() == Instruction::DBG_VALUE) { DebugOperand *DO = I.getDebugOperand(); if (DO->getKind() == DebugOperand::FrameIndex) { MF.emitInstruction(new MachineInstruction( Instruction::DBG_VALUE, DO->getFrameIndex(), DO->getType(), DO->getDebugLoc())); } } } } }" LLVM,NVPTX,306,void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const TargetMachine &TM = MF.getTargetMachine(); const NVPTXTargetMachine &NVTM = static_cast(TM); const NVPTXSubtarget &ST = NVTM.getSubtarget(); Function *F = MF.getFunction(); BasicBlock *BB = F->getEntryBlock(); IRBuilder<> Builder(BB); Builder.CreatePrologue(F); for (auto &BB : F->getBasicBlocks()) { Builder.CreateBlockPrologue(BB); for (auto &I : BB->getInstructions()) { Builder.CreateInstruction(I); } Builder.CreateBlockEpilogue(BB); } Builder.CreateEpilogue(F); } LLVM,NVPTX,307,"const char *getPassName(const Pass *P) { if (P->getPassKind() == PassKind::NVPTX) { return ""NVPTX Proxy Register Instruction Erasure""; } else { return P->getPassName(); } }" LLVM,NVPTX,308, LLVM,NVPTX,309,"StringRef getName(const NVPTXTargetMachine &TM) { StringRef targetSpecificValue = TM.getTargetSpecificValue(); SmallVector vfs; SmallVector ufs; TM.getApplicableVFsAndUFs(vfs, ufs); std::string name = ""NVPTX""; for (auto vf : vfs) { name += "" "" + vf.str(); } for (auto uf : ufs) { name += "" "" + uf.str(); } return name; }" LLVM,NVPTX,310,"void eliminateFrameIndex(Instruction *I) { if (I->getOpcode() == Instruction::Call && I->getOperand(0)->getType()->isPointerTy() && I->getOperand(0)->getType()->getPointerAddressSpace() == NVPTX::VRFrame) { I->setOperand(0, ConstantInt::get(I->getContext(), 0)); } }" LLVM,NVPTX,311,"static const char *getFrameRegister(const NVPTX::VRFrame &vrFrame) { const char *frameRegister = vrFrame.getFrameRegister(); if (!frameRegister) { return """"; } return frameRegister; }" LLVM,NVPTX,312,"BitVector getReservedRegs(const Module &M) { BitVector reservedRegs(M.getContext(), M.getNumberOfRegisters()); reservedRegs.set(NVPTX::ENVREG0); reservedRegs.set(NVPTX::ENVREG31); reservedRegs.set(NVPTX::VRFrame32); reservedRegs.set(NVPTX::VRFrameLocal32); reservedRegs.set(NVPTX::VRFrame64); reservedRegs.set(NVPTX::VRFrameLocal64); reservedRegs.set(NVPTX::VRDepot); return reservedRegs; }" LLVM,NVPTX,313,static const Target::RegClass *getCalleeSavedRegs(const NVPTXTargetMachine &TM) { const Target::RegClass *RegClass = TM.getTargetSpecificValue(); if (!RegClass) return nullptr; return RegClass->getCalleeSavedRegs(); } LLVM,NVPTX,314, LLVM,NVPTX,315,"static const RegisterInfo NVPTXRegisterInfo[] = { }; static const TargetInfo TheNVPTXTargetInfo(NVPTXTriple, NVPTXRegisterInfo); static const Target TheNVPTXTarget(&TheNVPTXTargetInfo); static const RegisterInfo *getRARegister(const MachineFunction &MF) { return &NVPTXRegisterInfo[NVPTX::RA]; }" LLVM,NVPTX,316,"void eliminateFrameIndex(Instruction *I) { if (I->getOpcode() == Instruction::Call && I->getOperand(0)->getType()->isPointerTy() && I->getOperand(0)->getType()->getPointerAddressSpace() == NVPTX::VRFrame) { I->setOperand(0, ConstantInt::get(I->getContext(), 0)); } }" LLVM,NVPTX,317,static const Target::RegClass *getCalleeSavedRegs(const NVPTXTargetMachine &TM) { const Target::RegClass *RegClass = TM.getTargetSpecificValue(); if (!RegClass) return nullptr; return RegClass->getCalleeSavedRegs(); } LLVM,NVPTX,318, LLVM,NVPTX,319,"void eliminateFrameIndex(Instruction *I) { if (I->getOpcode() == Instruction::Call && I->getOperand(0)->getType()->isPointerTy() && I->getOperand(0)->getType()->getPointerAddressSpace() == NVPTX::VRFrame) { I->setOperand(0, ConstantInt::get(I->getContext(), 0)); } }" LLVM,NVPTX,320,static const Target::RegClass *getCalleeSavedRegs(const NVPTXTargetMachine &TM) { const Target::RegClass *RegClass = TM.getTargetSpecificValue(); if (!RegClass) return nullptr; return RegClass->getCalleeSavedRegs(); } LLVM,NVPTX,321,"static const char *getFrameRegister(const NVPTX::VRFrame &vrFrame) { const char *frameRegister = vrFrame.getFrameRegister(); if (!frameRegister) { return """"; } return frameRegister; }" LLVM,NVPTX,322,"void eliminateCallFramePseudoInstr(Function &F, TargetMachine &TM) { if (!TM.getTarget().hasFeature(""nvptx-call-frame-setup"")) return; for (auto &I : instructions(F)) { if (I.getOpcode() == Instruction::CallFrameSetup || I.getOpcode() == Instruction::CallFrameDestroy) { I.replaceAllUsesWith(UndefValue::get(I.getType())); I.eraseFromParent(); } } }" LLVM,NVPTX,323,void eliminateFrameIndex(Instruction *Instr) { if (!Instr->hasFrameIndex()) { return; } auto FrameIndex = Instr->getFrameIndex(); if (FrameIndex->isConstant()) { Instr->replaceFrameIndex(FrameIndex->getConstant()); } else { Instr->replaceFrameIndex(NVPTX::VRFrame); } } LLVM,NVPTX,324,"void eliminateFrameIndex(Instruction *I) { if (I->getOpcode() == Instruction::Call && I->getOperand(0)->getType()->isAbstractFrameIndex()) { I->setOperand(0, new ConstantInt(0)); } }" LLVM,NVPTX,325,static const Target::RegClass *getCalleeSavedRegs(const NVPTXTargetMachine &TM) { const Target::RegClass *RegClass = TM.getTargetSpecificValue(); if (!RegClass) return nullptr; return RegClass->getCalleeSavedRegs(); } LLVM,NVPTX,326,"Value *getFrameRegister(Module *M, Type *Ty) { if (!M->getDebugInfo()) { return nullptr; } DebugInfo *DI = M->getDebugInfo(); if (!DI->hasFrameRegister()) { return nullptr; } Value *FrameRegister = DI->getFrameRegister(); if (FrameRegister->getType()->isPointerTy() && (FrameRegister->getType()->getPointerElementType()->isIntegerTy(64) || FrameRegister->getType()->getPointerElementType()->isIntegerTy(32))) { return FrameRegister; } return nullptr; }" LLVM,NVPTX,327,"static BitVector getReservedRegs(const Target &T) { BitVector reservedRegs(T.getNumRegs()); reservedRegs.set(T.getRegNum(""NVPTX"")); reservedRegs.set(T.getRegNum(""NVPTX"")); return reservedRegs; }" LLVM,NVPTX,328,"const char *getPassName(const char *passName) { if (passName == ""NVPTX"") { return ""NVPTX Replace Image Handles""; } else { return passName; } }" LLVM,NVPTX,329,void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const NVPTXTargetInfo &NVPTX = T.getNVPTXTargetInfo(); MF.emitFunctionBody(); MF.emitFunctionEpilogue(); } LLVM,NVPTX,330,"const char *getPassName(const char *passName) { if (passName == ""NVPTX"") { return ""NVPTX Replace Image Handles""; } else { return passName; } }" LLVM,NVPTX,331,"void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const Target &T = TM->getTarget(); const TargetData *TD = TM->getTargetData(); const TargetInstrInfo *TII = TM->getTargetInstrInfo(); const TargetRegisterInfo *TRI = TM->getTargetRegisterInfo(); const TargetSubtargetInfo *TSI = TM->getTargetSubtargetInfo(); Function *F = MF.getFunction(); BasicBlock *BB = F->getEntryBlock(); Instruction *I = BB->getFirstNonPHI(); emitFunctionPrologue(MF, BB, I); while (I) { Instruction *Next = I->getNextNode(); if (I->getOpcode() == Instruction::Call) { emitCallInstruction(MF, I, Next); } else if (I->getOpcode() == Instruction::Ret) { emitReturnInstruction(MF, I, Next); } else { emitInstruction(MF, I, Next); } I = Next; } emitFunctionEpilogue(MF, BB, I); }" LLVM,NVPTX,332,"const char *getPassName(const char *passName) { if (passName == ""NVPTX"") { return ""NVPTX Replace Image Handles""; } else { return passName; } }" LLVM,NVPTX,333,void runOnMachineFunction(MachineFunction &MF) { const Target &T = MF.getTarget(); const NVPTXTargetInfo &NVPTX = T.getNVPTXTargetInfo(); MF.emitFunctionBody(); MF.emitFunctionEpilogue(); } LLVM,NVPTX,334,"bool isVirtualSection(const Section §ion) { if (section.getSize() == 0) { return true; } if (section.getType() == SectionType::NVPTX) { return true; } return false; } ``` This function takes a `Section` object as an argument and returns a boolean value indicating whether the section is virtual or not. It checks if the section has no actual object file contents and if it has a NVPTX-specific value. If either of these conditions is true, the function returns `true`, indicating that the section is virtual. Otherwise, it returns `false`. You can use this function in your LLVM Compiler code to check if a section is virtual or not. For example, you could use it in a function that processes sections to determine which ones are virtual and which ones are not. ``` void processSections(const Module &module) { for (const Section §ion : module.sections()) { if (isVirtualSection(section)) { } else { } } }" LLVM,NVPTX,335,"bool isVirtualSection(const Section §ion) { if (section.getSize() == 0) { return true; } if (section.getType() == SectionType::NVPTX) { return true; } return false; } ``` This function takes a `Section` object as an argument and returns a boolean value indicating whether the section is virtual or not. It checks if the section has no actual object file contents and if it has a NVPTX-specific value. If either of these conditions is true, the function returns `true`, indicating that the section is virtual. Otherwise, it returns `false`. You can use this function in your LLVM Compiler code to check if a section is virtual or not. For example, you could use it in a function that processes sections to determine which ones are virtual and which ones are not. ``` void processSections(const Module &module) { for (const Section §ion : module.sections()) { if (isVirtualSection(section)) { } else { } } }" LLVM,NVPTX,336,void getAnalysisUsage(AnalysisUsage &AU) { NVPTXBackendBase::getAnalysisUsage(AU); AU.addRequired(); AU.addRequired(); } LLVM,NVPTX,337,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,338,"const char *getPassName(const Pass *P) { if (P->getKind() == PassKind::NVPTX) { return ""Split basic blocks at barrier""; } else { return P->getName(); } }" LLVM,NVPTX,339,"void runOnFunction(Function &F) override { auto target = getAnalysis().getTarget(); if (target->getTriple().getArch() != Triple::nvptx || target->getTriple().getSubArch() != Triple::nvptx64 || target->getTriple().getVendor() != Triple::nvidia) { return; } auto funcName = F.getName(); if (funcName.startsWith(""bar_split"")) { auto args = F.getArguments(); if (args.size() != 4) { return; }" LLVM,NVPTX,340," const std::string &FeatureString) { if (Target.getTriple().getArch() == llvm::Triple::nvptx) { Target.setTargetSpecificValue(llvm::Triple::nvptx, ""sm_20"", 0, 32); } }" LLVM,NVPTX,341, LLVM,NVPTX,342,TargetInstrInfo *getInstrInfo(const Target &T) { if (T.getTargetID() == Target::NVPTX) { return new NVPTXInstrInfo(); } else { return nullptr; } } LLVM,NVPTX,343,static const TargetRegisterInfo *getRegisterInfo(const NVPTXTargetMachine &TM) { const Triple &TT = TM.getTargetTriple(); const TargetRegisterInfo *TRI = TM.getRegisterInfo(); if (TT.getArch() == Triple::NVPTX) { return TRI; } return nullptr; } LLVM,NVPTX,344," const std::string &FeatureStr) { if (Target.getArch() == Triple::nvptx) { Target.setTargetSpecificValue(Triple::nvptx, ""sm_20"", 0, 32); Target.setSubtargetDependency(Triple::nvptx, ""sm_20"", 0, 32); } }" LLVM,NVPTX,345, LLVM,NVPTX,346,"class NVPTXSubtarget : public NVPTXTargetMachine { public: NVPTXSubtarget(const Module &M, const TargetMachine &TM) : NVPTXTargetMachine(M, TM) { setTargetTriple(""nvptx-unknown-unknown""); setTargetArch(""nvptx""); setTargetCPU(""nvptx""); setTargetABI(""nvptx""); setTargetVersion(0); setTargetFeatures(""sm_10""); setTargetFeatureStr(""we are not using FeatureStr""); setTargetFPMath(""0""); setTargetFPMathStr(""31""); }" LLVM,NVPTX,347, LLVM,NVPTX,348,"DataLayout getDataLayout(Module &M) { const std::string &Triple = M.getTargetTriple(); DataLayout DL(Triple); DL.setTargetSpecific(NVPTX, ""e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64""); return DL; }" LLVM,NVPTX,349, LLVM,NVPTX,350," const std::string &FeatureString) { if (Target.getTriple().getArch() == llvm::Triple::nvptx) { Target.setTargetSpecificValue(llvm::Triple::nvptx, ""sm_20"", 0, 32); } }" LLVM,NVPTX,351, LLVM,NVPTX,352," const std::string &FeatureString) { if (Target.getTriple().getArch() == llvm::Triple::nvptx) { Target.setTargetSpecificValue(llvm::Triple::nvptx, ""sm_20"", 0, 32); } }" LLVM,NVPTX,353, NVPTXTargetMachineTargetInfo(M->getNVPTXTargetMachineTargetInfo()) { } LLVM,NVPTX,354,DataLayout getDataLayout(Module &M) { return M.getDataLayout(); } LLVM,NVPTX,355, LLVM,NVPTX,356, NVPTXTargetMachineTargetInfo(M->getNVPTXTargetMachineTargetInfo()) { } LLVM,NVPTX,357,DataLayout getDataLayout(Module &M) { return M.getDataLayout(); } LLVM,NVPTX,358,TargetInstrInfo *getInstrInfo(const Target &T) { if (T.getTargetID() == Target::NVPTX) { return new NVPTXInstrInfo(); } else { return nullptr; } } LLVM,NVPTX,359,static const TargetRegisterInfo *getRegisterInfo(const NVPTXTargetMachine &TM) { const Triple &TT = TM.getTargetTriple(); const TargetRegisterInfo *TRI = TM.getRegisterInfo(); if (TT.getArch() == Triple::NVPTX) { return TRI; } return nullptr; } LLVM,NVPTX,360, NVPTXTargetMachineTargetInfo(M->getNVPTXTargetMachineTargetInfo()) { } LLVM,NVPTX,361,"void addFastRegAlloc(LLVMContext &Context, NVPTXTargetMachine &Target) { Context.addPass(new TargetIndependentRegAllocPass()); Target.addPass(new NVPTXRegAllocPass()); }" LLVM,NVPTX,362, LLVM,NVPTX,363, LLVM,NVPTX,364,"void addOptimizedRegAlloc(LLVMContext &Context, PassManager &PM) { PM.add(new NVPTXRegAllocPass()); PM.add(new NVPTXStackSlotColoringPass()); PM.add(new NVPTXMachineSchedulingPass()); }" LLVM,NVPTX,365,"void addPostRegAlloc(LLVMContext &Context, Function &F) { const char *NVPTX = ""NVPTX""; RegisterAllocationPass *RAP = F.getRegisterAllocationPass(); if (!RAP) { return; } ProloguePass *PP = F.getProloguePass(); EpiloguePass *EP = F.getEpiloguePass(); if (!PP || !EP) { return; } const char *NVPTX = ""NVPTX""; FunctionPass *NewPass = new FunctionPass(Context, NVPTX); F.addPass(NewPass); F.setCurrentPass(NewPass); NewPass->runOnFunction(F); F.removePass(NewPass); }" LLVM,NVPTX,366,"void addPreRegAlloc(LLVMContext &Context, Function &F) { const Target &NVPTXTarget = Context.getTarget(); const TargetMachine &NVPTXTargetMachine = NVPTXTarget.getTargetMachine(); PassManager NVPTXPassManager; NVPTXPassManager.add(new TargetPass(NVPTXTargetMachine)); NVPTXPassManager.add(new RegisterAllocationPass(NVPTXTargetMachine)); NVPTXPassManager.run(F); }" LLVM,NVPTX,367,"void addRegAssignAndRewriteFast(Function &F, TargetMachine &TM) { const Triple &TT = TM.getTargetTriple(); const std::string &ArchName = TT.getArchName(); if (ArchName == ""nvptx"") { addRegAssignAndRewrite(F, TM); } else { } }" LLVM,NVPTX,368,"static PassConfig createPassConfig(const NVPTXTargetMachine &TM) { PassConfig config; config.addPass(new NVPTXTargetMachinePass(TM)); config.addPass(new NVPTXCodeGenPass(TM)); config.addPass(new NVPTXOptimizationPass(TM)); return config; } ``` This function creates a `PassConfig` object and adds the NVPTX target-specific passes to it. The `NVPTXTargetMachinePass` pass is responsible for setting up the target machine for the NVPTX backend, while the `NVPTXCodeGenPass` pass generates the machine code for the NVPTX backend. The `NVPTXOptimizationPass` pass performs optimization on the generated machine code. You can use this function to create a pass configuration object that can be used by the `addPassToEmitX` methods to generate a pipeline of CodeGen passes for the NVPTX backend. For example: ``` int main() { NVPTXTargetMachine TM; PassConfig config = createPassConfig(TM); PassManager PM; PM.addPass(new NVPTXTargetMachinePass(TM)); PM.addPass(new NVPTXCodeGenPass(TM)); PM.addPass(new NVPTXOptimizationPass(TM)); PM.run(TM); return 0; }" LLVM,NVPTX,369,TargetTransformInfo *getTargetTransformInfo(const Target &T) { if (T.getTargetID() == NVPTXTargetID) { return new NVPTXTargetTransformInfo(); } return nullptr; } LLVM,NVPTX,370,"void registerPassBuilderCallbacks(PassBuilder &PB) { PB.registerCallback(""nvptx"", ""nvptx"", ""nvvm-reflect"", ""nvvm-intr-range""); }" LLVM,NVPTX,371, LLVM,NVPTX,372,"void addInstSelector(LLVMContext& Context, StringRef Target) { Context.addInstSelector(Target, new NVPTXInstSelector()); } class NVPTXInstSelector : public InstSelector { public: NVPTXInstSelector() : InstSelector(Target) {} void addInstruction(Instruction* I) { } void addInstruction(Instruction* I, const char* Name) { } void addInstruction(Instruction* I, const char* Name, const char* Opcode) { } void addInstruction(Instruction* I, const char* Name, const char* Opcode, const char* Operand) { } void addInstruction(Instruction* I, const char* Name, const char* Opcode, const char* Operand, const char* Operand2) { } void addInstruction(Instruction* I, const char* Name, const char* Opcode, const char* Operand, const char* Operand2, const char* Operand3) { } void addInstruction(Instruction* I, const char* Name, const char* Opcode, const char* Operand, const char* Operand2, const char* Operand3, const char* Operand4) { }" LLVM,NVPTX,373, LLVM,NVPTX,374, LLVM,NVPTX,375,"void addOptimizedRegAlloc(LLVMContext &Context, Module &M) { TargetSpecificValue *NVPTX = Context.getTargetSpecificValue(M, ""NVPTX""); addPass(Context, M, ""NVPTX"", ""NVPTX uses no regalloc!"", ""After Machine Scheduling"", ""After StackSlotColoring""); }" LLVM,NVPTX,376,"void addPostRegAlloc(LLVMContext &Context, Function &F) { const char *NVPTX = ""NVPTX""; RegisterAllocationPass *RAP = F.getRegisterAllocationPass(); if (!RAP) { return; } ProloguePass *PP = F.getProloguePass(); EpiloguePass *EP = F.getEpiloguePass(); if (!PP || !EP) { return; } const char *NVPTX = ""NVPTX""; FunctionPass *NewPass = new FunctionPass(Context, NVPTX); F.addPass(NewPass); F.setCurrentPass(NewPass); NewPass->runOnFunction(F); F.removePass(NewPass); }" LLVM,NVPTX,377,PassConfig createPassConfig(const TargetMachine &TM) { PassConfig config; config.addPass(new NVPTXTargetPass()); config.addPass(new NVPTXCodeGenPass()); config.addPass(new NVPTXOptimizationPass()); config.addPass(new NVPTXLoweringPass()); return config; } LLVM,NVPTX,378,static RegisterAllocator *createTargetRegisterAllocator(const TargetMachine *TM) { return new NVPTXRegisterAllocator(TM); } LLVM,NVPTX,379,"TargetIRAnalysis *getTargetIRAnalysis(const Target &T) { if (T.getTriple().getArch() == Triple::nvptx) { return new NVPTXTargetIRAnalysis(); } return nullptr; } ``` This function takes a `Target` object as an argument and returns a `TargetIRAnalysis` object if the target is NVPTX, and `nullptr` otherwise. The `NVPTXTargetIRAnalysis` class is a subclass of `TargetIRAnalysis` that provides target-specific analysis for NVPTX. You can use this function to get a `TargetIRAnalysis` object for the NVPTX target and use it to perform target-specific analysis on the LLVM IR. Here is an example of how you can use this function: ``` #include ""llvm/IR/Module.h"" #include ""llvm/IR/TargetIRAnalysis.h"" #include ""llvm/IR/NVPTX/NVPTXTargetIRAnalysis.h"" int main() { Module M(""my_module""); TargetIRAnalysis *TIA = getTargetIRAnalysis(M.getTarget()); return 0; }" LLVM,NVPTX,380,"void addInstSelector(LLVMContext &Context, StringRef Target) { auto *Selector = new InstructionSelector(Context, Target); Selector->addTargetSpecificValue(""NVPTX"", ""NVPTX"", ""NVPTX"", ""NVPTX"", ""NVPTX""); Context.addInstructionSelector(Selector); }" LLVM,NVPTX,381,static const TargetLowering *getObjFileLowering(const TargetMachine &TM) { const NVPTXTargetMachine *NVPTXTM = static_cast(&TM); const NVPTXTargetLowering *NVPTXTL = static_cast(NVPTXTM->getTargetLowering()); return NVPTXTL; } LLVM,NVPTX,382,class NVPTXSubtargetInfo : public TargetSubtargetInfo { public: NVPTXSubtargetInfo(const NVPTXSubtarget &ST) : TargetSubtargetInfo(ST) {} const NVPTXSubtarget &getSubtargetImpl() const { return static_cast(*this); } LLVM,NVPTX,383,"void addIRPasses(LLVMContext &Context, PassManager &PM) { PM.add(createTargetTransformInfoWrapperPass(TargetTransformInfo(Context))); PM.add(createInstructionCombiningPass()); PM.add(createReassociatePass()); PM.add(createGVNPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createCFGSimplificationPass()); PM.add(createLoopRotatePass()); PM.add(createLoopUnrollPass()); PM.add(createLoopUnswitchPass()); PM.add(createMem2RegPass()); PM.add(createDemoteMemoryToRegisterPass()); PM.add(createPromoteMemoryToRegisterPass()); PM.add(createConstantPropagationPass()); PM.add(createDeadStoreEliminationPass()); PM.add(createAggressiveDCEPass()); PM.add(createCFGSimplificationPass()); PM.add(createLoopRotatePass()); PM.add(createLoopUnrollPass()); PM.add(createLoopUnswitchPass()); PM.add(createMem2RegPass()); PM.add(createDemoteMemoryToRegisterPass()); PM.add(createPromoteMemoryToRegisterPass()); PM.add(createConstantPropagationPass()); PM.add(createDeadStoreEliminationPass()); PM.add(createAggressiveDCEPass()); }" LLVM,NVPTX,384,"void addPostRegAlloc(LLVMContext &Context, Function &F) { const char *NVPTX = ""NVPTX""; RegisterAllocationPass *RAP = F.getRegisterAllocationPass(); if (!RAP) { return; } ProloguePass *PP = F.getProloguePass(); EpiloguePass *EP = F.getEpiloguePass(); if (!PP || !EP) { return; } const char *NVPTX = ""NVPTX""; FunctionPass *NewPass = new FunctionPass(Context, NVPTX); F.addPass(NewPass); F.setCurrentPass(NewPass); NewPass->runOnFunction(F); F.removePass(NewPass); }" LLVM,NVPTX,385,"void addInstSelector(LLVMContext& Context, StringRef Target) { Triple TargetTriple(Target); InstructionSelectorPass* InstSelPass = new InstructionSelectorPass(); InstSelPass->setTargetTriple(TargetTriple); Context.addInstructionSelectorPass(InstSelPass); }" LLVM,NVPTX,386,void addPassesToEmitMC(PassManager &PM) { PM.add(createNVPTXMCCodeEmitterPass()); PM.add(createNVPTXMCCodeEmitterLegacyPass()); PM.add(createNVPTXMCCodeEmitterLegacyPass()); PM.add(createNVPTXMCCodeEmitterLegacyPass()); } LLVM,NVPTX,387,DataLayout getDataLayout(Module &M) { return M.getDataLayout(); } LLVM,NVPTX,388,TargetInstrInfo *getInstrInfo(const Target &T) { if (T.getTargetID() == Target::NVPTX) { return new NVPTXInstrInfo(); } else { return nullptr; } } LLVM,NVPTX,389,"Value *getPredicatedAddrSpace(Value *Ptr, unsigned AddrSpace) { if (Intrinsic::nvvm_isspacep_const(Ptr, AddrSpace)) { return Ptr; } else if (Intrinsic::nvvm_isspacep_global(Ptr, AddrSpace)) { return Ptr; } else if (Intrinsic::nvvm_isspacep_local(Ptr, AddrSpace)) { return Ptr; } else if (Intrinsic::nvvm_isspacep_shared(Ptr, AddrSpace)) { return Ptr; } else { return nullptr; } }" LLVM,NVPTX,390,static const TargetRegisterInfo *getRegisterInfo(const NVPTXTargetMachine &TM) { const Triple &TT = TM.getTargetTriple(); const TargetRegisterInfo *TRI = TM.getRegisterInfo(); if (TT.getArch() == Triple::NVPTX) { return TRI; } return nullptr; } LLVM,NVPTX,391,class NVPTXSubtargetInfo : public TargetSubtargetInfo { public: NVPTXSubtargetInfo(const NVPTXSubtarget &ST) : TargetSubtargetInfo(ST) {} const NVPTXSubtarget &getSubtargetImpl() const { return static_cast(*this); } LLVM,NVPTX,392,void addPassesToEmitMC(PassManager &PM) { PM.add(createNVPTXMCCodeEmitterPass()); PM.add(createNVPTXMCCodeEmitterLegacyPass()); PM.add(createNVPTXMCCodeEmitterLegacyPass()); PM.add(createNVPTXMCCodeEmitterLegacyPass()); } LLVM,NVPTX,393,"static PassConfig createPassConfig(const NVPTXTargetMachine &TM) { PassConfig config; config.addPass(new NVPTXTargetMachinePass(TM)); config.addPass(new NVPTXCodeGenPass(TM)); config.addPass(new NVPTXOptimizationPass(TM)); return config; } ``` This function creates a `PassConfig` object and adds the NVPTX target-specific passes to it. The `NVPTXTargetMachinePass` pass is responsible for setting up the target machine for the NVPTX backend, while the `NVPTXCodeGenPass` pass generates the machine code for the NVPTX backend. The `NVPTXOptimizationPass` pass performs optimization on the generated machine code. You can use this function to create a pass configuration object that can be used by the `addPassToEmitX` methods to generate a pipeline of CodeGen passes for the NVPTX backend. For example: ``` int main() { NVPTXTargetMachine TM; PassConfig config = createPassConfig(TM); PassManager PM; PM.addPass(new NVPTXTargetMachinePass(TM)); PM.addPass(new NVPTXCodeGenPass(TM)); PM.addPass(new NVPTXOptimizationPass(TM)); PM.run(TM); return 0; }" LLVM,NVPTX,394, LLVM,NVPTX,395,"void addIRPasses(LLVMContext &Context, PassManager &PM) { PM.add(createInstructionCombiningPass()); PM.add(createReassociatePass()); PM.add(createGVNPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createCFGSimplificationPass()); PM.add(createNVPTXInstructionSelectionPass()); PM.add(createNVPTXRegisterAllocationPass()); PM.add(createNVPTXCodeGenerationPass()); }" LLVM,NVPTX,396,"void addPostRegAlloc(LLVMContext &Context, Function &F) { const Target &NVPTX = Context.getTarget(); PassManager PM; PM.add(new RegAllocPass()); PM.add(new NVPTXRegAllocPass()); PM.run(F); }" LLVM,NVPTX,397,"TargetIRAnalysis *getTargetIRAnalysis(const Target &T) { if (T.getTriple().getArch() == Triple::nvptx) { return new NVPTXTargetIRAnalysis(); } return nullptr; } ``` This function takes a `Target` object as an argument and returns a `TargetIRAnalysis` object if the target is NVPTX, and `nullptr` otherwise. The `NVPTXTargetIRAnalysis` class is a subclass of `TargetIRAnalysis` that provides target-specific analysis for NVPTX. You can use this function to get a `TargetIRAnalysis` object for the NVPTX target and use it to perform target-specific analysis on the LLVM IR. Here is an example of how you can use this function: ``` #include ""llvm/IR/Module.h"" #include ""llvm/IR/TargetIRAnalysis.h"" #include ""llvm/IR/NVPTX/NVPTXTargetIRAnalysis.h"" int main() { Module M(""my_module""); TargetIRAnalysis *TIA = getTargetIRAnalysis(M.getTarget()); return 0; }" LLVM,NVPTX,398,static const TargetRegisterInfo *getRegisterInfo(const NVPTXTargetMachine &TM) { const Triple &TT = TM.getTargetTriple(); const TargetRegisterInfo *TRI = TM.getRegisterInfo(); if (TT.getArch() == Triple::NVPTX) { return TRI; } return nullptr; } LLVM,NVPTX,399,"void addInstSelector(LLVMContext &Context, StringRef Target) { Context.addInstSelector(Target, new NVPTXInstSelector()); }" LLVM,NVPTX,400,"void addIRPasses(LLVMContext &Context, PassManager &PM) { PM.add(createInstructionCombiningPass()); PM.add(createReassociatePass()); PM.add(createGVNPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createCFGSimplificationPass()); PM.add(createNVPTXInstructionSelectionPass()); PM.add(createNVPTXRegisterAllocationPass()); PM.add(createNVPTXCodeGenerationPass()); }" LLVM,NVPTX,401,"void addPostRegAlloc(LLVMContext &Context, Function &F) { const Target &NVPTX = Context.getTarget(); PassManager PM; PM.add(new RegAllocPass()); PM.add(new NVPTXRegAllocPass()); PM.run(F); }" LLVM,NVPTX,402,"void addPreRegAlloc(LLVMContext &Context, NVPTXTargetMachine &Target) { Context.addPass(new NVPTXPreRegAllocPass(Target)); }" LLVM,NVPTX,403,"void registerPassBuilderCallbacks(PassBuilder &PB) { PB.registerCallback(""nvptx"", ""nvptx"", ""nvvm-reflect"", ""nvvm-intr-range""); }" LLVM,NVPTX,404, LLVM,NVPTX,405,"void addRegAssignmentFast(LLVMContext &Context, Function &F) { const TargetMachine *TM = Context.getTargetMachine(); RegisterAllocator &RA = Context.getRegisterAllocator(); BasicBlock &EntryBlock = F.getEntryBlock(); Type *ReturnType = F.getReturnType(); BasicBlock *AssignBlock = BasicBlock::Create(Context, ""reg_assign"", &F); BasicBlock *ReturnBlock = BasicBlock::Create(Context, ""return"", &F); Instruction *AssignInst = new Instruction(Context, ""assign"", ReturnType); AssignInst->setOperand(0, ReturnType); AssignBlock->addInstruction(AssignInst); F.addBasicBlock(AssignBlock); F.addBasicBlock(ReturnBlock); F.setEntryBlock(EntryBlock); F.setReturnBlock(ReturnBlock); F.addPass(new RegisterAssignmentPass(Context, RA)); F.addPass(new RegisterRewritingPass(Context, RA)); }" LLVM,NVPTX,406,"void addIRPasses(LLVMContext &Context, PassManager &PM) { PM.add(createInstructionCombiningPass()); PM.add(createReassociatePass()); PM.add(createGVNPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createCFGSimplificationPass()); PM.add(createNVPTXInstructionSelectionPass()); PM.add(createNVPTXRegisterAllocationPass()); PM.add(createNVPTXCodeGenerationPass()); }" LLVM,NVPTX,407, LLVM,NVPTX,408,TargetTransformInfo *getTargetTransformInfo(const Target &T) { if (T.getTargetID() == NVPTXTargetID) { return new NVPTXTargetTransformInfo(); } return nullptr; } LLVM,NVPTX,409, LLVM,NVPTX,410,"TargetIRAnalysis *getTargetIRAnalysis(const Target &T) { if (T.getTriple().getArch() == Triple::nvptx) { return new NVPTXTargetIRAnalysis(); } return nullptr; } ``` This function takes a `Target` object as an argument and returns a `TargetIRAnalysis` object if the target is NVPTX, and `nullptr` otherwise. The `NVPTXTargetIRAnalysis` class is a subclass of `TargetIRAnalysis` that provides target-specific analysis for NVPTX. You can use this function to get a `TargetIRAnalysis` object for the NVPTX target and use it to perform target-specific analysis on the LLVM IR. Here is an example of how you can use this function: ``` #include ""llvm/IR/Module.h"" #include ""llvm/IR/TargetIRAnalysis.h"" #include ""llvm/IR/NVPTX/NVPTXTargetIRAnalysis.h"" int main() { Module M(""my_module""); TargetIRAnalysis *TIA = getTargetIRAnalysis(M.getTarget()); return 0; }" LLVM,NVPTX,411,"void registerPassBuilderCallbacks(PassBuilder &PB) { PB.registerCallback(""nvptx"", ""nvptx"", ""nvvm-reflect"", ""nvvm-intr-range""); }" LLVM,NVPTX,412, LLVM,NVPTX,413, LLVM,NVPTX,414,"void addOptimizedRegAlloc(LLVMContext &Context, PassManager &PM) { return; }" LLVM,NVPTX,415,static RegisterAllocator *createTargetRegisterAllocator(const TargetMachine *TM) { return new NVPTXRegisterAllocator(TM); } LLVM,NVPTX,416, LLVM,NVPTX,417,"void addIRPasses(LLVMContext &Context, PassManager &PM) { PM.add(createTargetTransformInfoWrapperPass(TargetTransformInfo(Context))); PM.add(createInstructionCombiningPass()); PM.add(createReassociatePass()); PM.add(createGVNPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createCFGSimplificationPass()); PM.add(createLoopRotatePass()); PM.add(createLoopUnrollPass()); PM.add(createLoopUnswitchPass()); PM.add(createMem2RegPass()); PM.add(createDemoteMemoryToRegisterPass()); PM.add(createPromoteMemoryToRegisterPass()); PM.add(createConstantPropagationPass()); PM.add(createDeadStoreEliminationPass()); PM.add(createAggressiveDCEPass()); PM.add(createCFGSimplificationPass()); PM.add(createLoopRotatePass()); PM.add(createLoopUnrollPass()); PM.add(createLoopUnswitchPass()); PM.add(createMem2RegPass()); PM.add(createDemoteMemoryToRegisterPass()); PM.add(createPromoteMemoryToRegisterPass()); PM.add(createConstantPropagationPass()); PM.add(createDeadStoreEliminationPass()); PM.add(createAggressiveDCEPass()); }" LLVM,NVPTX,418, LLVM,NVPTX,419, LLVM,NVPTX,420, LLVM,NVPTX,421,"void addIRPasses(LLVMContext &Context, PassManager &PM) { PM.add(createTargetTransformInfoWrapperPass(TargetTransformInfo(Context))); PM.add(createInstructionCombiningPass()); PM.add(createReassociatePass()); PM.add(createGVNPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createCFGSimplificationPass()); PM.add(createLoopRotatePass()); PM.add(createLoopUnrollPass()); PM.add(createLoopUnswitchPass()); PM.add(createMem2RegPass()); PM.add(createDemoteMemoryToRegisterPass()); PM.add(createPromoteMemoryToRegisterPass()); PM.add(createConstantPropagationPass()); PM.add(createDeadStoreEliminationPass()); PM.add(createAggressiveDCEPass()); PM.add(createCFGSimplificationPass()); PM.add(createLoopRotatePass()); PM.add(createLoopUnrollPass()); PM.add(createLoopUnswitchPass()); PM.add(createMem2RegPass()); PM.add(createDemoteMemoryToRegisterPass()); PM.add(createPromoteMemoryToRegisterPass()); PM.add(createConstantPropagationPass()); PM.add(createDeadStoreEliminationPass()); PM.add(createAggressiveDCEPass()); }" LLVM,NVPTX,422,"void addIRPasses(LLVMContext &Context, PassManager &PM) { PM.add(createInstructionCombiningPass()); PM.add(createReassociatePass()); PM.add(createGVNPass()); PM.add(createDeadStoreEliminationPass()); PM.add(createCFGSimplificationPass()); PM.add(createLoopSimplificationPass()); PM.add(createLoopUnrollingPass()); PM.add(createLoopVectorizationPass()); PM.add(createInstructionSelectionPass()); PM.add(createTargetTransformInfoWrapperPass(TargetTransformInfo())); PM.add(createNVPTXInstructionCombiningPass()); PM.add(createNVPTXReassociatePass()); PM.add(createNVPTXGVNPass()); PM.add(createNVPTXDeadStoreEliminationPass()); PM.add(createNVPTXCFGSimplificationPass()); PM.add(createNVPTXLoopSimplificationPass()); PM.add(createNVPTXLoopUnrollingPass()); PM.add(createNVPTXLoopVectorizationPass()); PM.add(createNVPTXInstructionSelectionPass()); }" LLVM,NVPTX,423,"void addIRPasses(LLVMContext &Context, PassManager &PM) { PM.add(createInstructionCombiningPass()); PM.add(createReassociatePass()); PM.add(createGVNPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createCFGSimplificationPass()); PM.add(createLoopRotationPass()); PM.add(createLoopUnswitchingPass()); PM.add(createLoopVectorizationPass()); PM.add(createSLPVectorizationPass()); PM.add(createInstructionSelectionPass()); PM.add(createTargetTransformInfoWrapperPass(NVPTX)); PM.add(createTargetTransformInfoWrapperPass(NVPTX)); PM.add(createTargetTransformInfoWrapperPass(NVPTX)); PM.add(createTargetTransformInfoWrapperPass(NVPTX)); PM.add(createTargetTransformInfoWrapperPass(NVPTX)); PM.add(createTargetTransformInfoWrapperPass(NVPTX)); PM.add(createTargetTransformInfoWrapperPass(NVPTX)); PM.add(createTargetTransformInfoWrapperPass(NVPTX)); PM.add(createTargetTransformInfoWrapperPass(NVPTX)); PM.add(createTargetTransformInfoWrapperPass(NVPTX)); }" LLVM,NVPTX,424,"void addIRPasses(LLVMContext &Context, PassManager &PM) { PM.add(createTargetTransformInfoWrapperPass(Context, TargetTransformInfo::NVPTX)); PM.add(createTargetTransformInfoWrapperPass(Context, TargetTransformInfo::NVPTX)); PM.add(createTargetTransformInfoWrapperPass(Context, TargetTransformInfo::NVPTX)); PM.add(createTargetTransformInfoWrapperPass(Context, TargetTransformInfo::NVPTX)); }" LLVM,NVPTX,425, LLVM,NVPTX,426,void addPassesToEmitMC(PassManager &PM) { PM.add(createNVPTXMCCodeEmitterPass()); PM.add(createNVPTXMCCodeEmitterLegacyPass()); PM.add(createNVPTXMCCodeEmitterLegacyPass()); PM.add(createNVPTXMCCodeEmitterLegacyPass()); } LLVM,NVPTX,427,DataLayout getDataLayout(Module &M) { return M.getDataLayout(); } LLVM,NVPTX,428,TargetInstrInfo *getInstrInfo(const Target &T) { if (T.getTargetID() == Target::NVPTX) { return new NVPTXInstrInfo(); } else { return nullptr; } } LLVM,NVPTX,429,static const TargetRegisterInfo *getRegisterInfo(const NVPTXTargetMachine &TM) { const Triple &TT = TM.getTargetTriple(); const TargetRegisterInfo *TRI = TM.getRegisterInfo(); if (TT.getArch() == Triple::NVPTX) { return TRI; } return nullptr; } LLVM,NVPTX,430,class NVPTXSubtargetInfo : public TargetSubtargetInfo { public: NVPTXSubtargetInfo(const NVPTXSubtarget &ST) : TargetSubtargetInfo(ST) {} const NVPTXSubtarget &getSubtargetImpl() const { return static_cast(*this); } LLVM,NVPTX,431,"const char *getExplicitSectionGlobal(const GlobalValue *GV) { if (GV->hasSection()) { const char *section = GV->getSection(); if (section == ""nvptx"") { return ""nvptx""; } } return nullptr; }" LLVM,NVPTX,432,"StringRef getSectionForConstant(const Constant *C, SectionKind Kind) { const Module *M = C->getParent(); const Triple &TT = M->getTargetTriple(); if (TT.getArch() != Triple::nvptx) { return StringRef(); } const Type *Ty = C->getType(); if (Ty->isPointerTy()) { const Type *PointeeTy = Ty->getPointerElementType(); if (PointeeTy->isStructTy()) { const StructType *STy = cast(PointeeTy); if (STy->hasSection()) { StringRef SectionName = STy->getSection(); if (SectionName.startswith("".nvptx"")) { return SectionName; } } } }" LLVM,NVPTX,433, LLVM,NVPTX,434,"const char *getExplicitSectionGlobal(const GlobalValue *GV) { if (GV->hasSection()) { const char *section = GV->getSection(); if (section == ""nvptx"") { return ""nvptx""; } } return nullptr; }" LLVM,NVPTX,435,"StringRef getSectionForConstant(const Constant *C, SectionKind Kind) { const Module *M = C->getParent(); const Triple &TT = M->getTargetTriple(); if (TT.getArch() != Triple::nvptx) { return StringRef(); } const Type *Ty = C->getType(); if (Ty->isPointerTy()) { const Type *PointeeTy = Ty->getPointerElementType(); if (PointeeTy->isStructTy()) { const StructType *STy = cast(PointeeTy); if (STy->hasSection()) { StringRef SectionName = STy->getSection(); if (SectionName.startswith("".nvptx"")) { return SectionName; } } } }" LLVM,NVPTX,436, LLVM,NVPTX,437,"StringRef getSectionForConstant(const Constant *C, SectionKind Kind) { const Module *M = C->getParent(); const Triple &TT = M->getTargetTriple(); if (TT.getArch() != Triple::nvptx) { return StringRef(); } const Type *Ty = C->getType(); if (Ty->isPointerTy()) { const Type *PointeeTy = Ty->getPointerElementType(); if (PointeeTy->isStructTy()) { const StructType *STy = cast(PointeeTy); if (STy->hasSection()) { StringRef SectionName = STy->getSection(); if (SectionName.startswith("".nvptx"")) { return SectionName; } } } }" LLVM,NVPTX,438, LLVM,NVPTX,439,"StringRef getSectionForConstant(const Constant *C, SectionKind Kind) { const Module *M = C->getParent(); const Triple &TT = M->getTargetTriple(); if (TT.getArch() != Triple::nvptx) { return StringRef(); } const Type *Ty = C->getType(); if (Ty->isPointerTy()) { const Type *PointeeTy = Ty->getPointerElementType(); if (PointeeTy->isStructTy()) { const StructType *STy = cast(PointeeTy); if (STy->hasSection()) { StringRef SectionName = STy->getSection(); if (SectionName.startswith("".nvptx"")) { return SectionName; } } } }" LLVM,NVPTX,440,"StringRef getSectionForConstant(const Constant *C, SectionKind Kind) { const Module *M = C->getParent(); const Triple &TT = M->getTargetTriple(); if (TT.getArch() != Triple::nvptx) { return StringRef(); } const Type *Ty = C->getType(); if (Ty->isPointerTy()) { const Type *PointeeTy = Ty->getPointerElementType(); if (PointeeTy->isStructTy()) { const StructType *STy = cast(PointeeTy); if (STy->hasSection()) { StringRef SectionName = STy->getSection(); if (SectionName.startswith("".nvptx"")) { return SectionName; } } } }" LLVM,NVPTX,441, LLVM,NVPTX,442,"const char *getExplicitSectionGlobal(const GlobalValue *GV) { if (GV->hasSection()) { const char *section = GV->getSection(); if (section == ""nvptx"") { return ""nvptx""; } } return nullptr; }" LLVM,NVPTX,443,"StringRef getSectionForConstant(const Constant *C, SectionKind Kind) { const Module *M = C->getParent(); const Triple &TT = M->getTargetTriple(); if (TT.getArch() != Triple::nvptx) { return StringRef(); } const Type *Ty = C->getType(); if (Ty->isPointerTy()) { const Type *PointeeTy = Ty->getPointerElementType(); if (PointeeTy->isStructTy()) { const StructType *STy = cast(PointeeTy); if (STy->hasSection()) { StringRef SectionName = STy->getSection(); if (SectionName.startswith("".nvptx"")) { return SectionName; } } } }" LLVM,NVPTX,444, LLVM,NVPTX,445, LLVM,NVPTX,446,"StringRef getSectionForConstant(const Constant *C, SectionKind Kind) { const Module *M = C->getParent(); const Triple &TT = M->getTargetTriple(); if (TT.getArch() != Triple::nvptx) { return StringRef(); } const Type *Ty = C->getType(); if (Ty->isPointerTy()) { const Type *PointeeTy = Ty->getPointerElementType(); if (PointeeTy->isStructTy()) { const StructType *STy = cast(PointeeTy); if (STy->hasSection()) { StringRef SectionName = STy->getSection(); if (SectionName.startswith("".nvptx"")) { return SectionName; } } } }" LLVM,NVPTX,447,"const char *getExplicitSectionGlobal(const GlobalValue *GV) { if (GV->hasSection()) { const char *section = GV->getSection(); if (section == ""nvptx"") { return ""nvptx""; } } return nullptr; }" LLVM,NVPTX,448,"StringRef getSectionForConstant(const Constant *C, SectionKind Kind) { const Module *M = C->getParent(); const Triple &TT = M->getTargetTriple(); if (TT.getArch() != Triple::nvptx) { return StringRef(); } const Type *Ty = C->getType(); if (Ty->isPointerTy()) { const Type *PointeeTy = Ty->getPointerElementType(); if (PointeeTy->isStructTy()) { const StructType *STy = cast(PointeeTy); if (STy->hasSection()) { StringRef SectionName = STy->getSection(); if (SectionName.startswith("".nvptx"")) { return SectionName; } } } }" LLVM,NVPTX,449, LLVM,NVPTX,450,"void changeSection(LLVMContext &Context, Section &NewSection) { Section *CurrentSection = Context.getCurrentSection(); Context.setCurrentSection(NewSection); if (NewSection) { Context.setStreamer(NewSection.getStreamer()); } if (CurrentSection) { Context.setStreamer(CurrentSection.getStreamer()); } }" LLVM,NVPTX,451, unsigned int flags) { DwarfFileDirective directive; directive.filename = filename; directive.directory = directory; directive.producer = producer; directive.version = version; directive.flags = flags; getCurrentCompilationUnit()->addDirective(directive); } LLVM,NVPTX,452,"void emitRawBytes(uint8_t *data, size_t size) { for (size_t i = 0; i < size; i++) { emitByte(data[i]); } }" LLVM,NVPTX,453,"void outputDwarfFileDirectives(const Module &M, raw_ostream &OS) { const DWARFFileTable *FileTable = M.getDWARFFileTable(); for (const DWARFFile *File : FileTable->getFiles()) { OS << "".file "" << File->getFilename() << ""\n""; } }" LLVM,NVPTX,454,"void changeSection(LLVMContext &Context, Section &NewSection) { Section *CurrentSection = Context.getCurrentSection(); Context.setCurrentSection(NewSection); if (NewSection) { Context.setStreamer(NewSection.getStreamer()); } if (CurrentSection) { Context.setStreamer(CurrentSection.getStreamer()); } }" LLVM,NVPTX,455,"void changeSection(LLVMContext &Context, const std::string &SectionName) { const MCSection *CurrentSection = Context.getCurrentSection(); if (CurrentSection != nullptr) { const std::string &CurrentSectionName = CurrentSection->getName(); if (CurrentSectionName != SectionName) { Context.setCurrentSection(SectionName); } } }" LLVM,NVPTX,456,"void closeLastSection(LLVMContext &Context, NVPTXTargetMachine &Target) { const MCSection *LastSection = Target.getLastSection(); Target.closeSection(LastSection); Target.updateSectionList(LastSection, nullptr); }" LLVM,NVPTX,457,"void changeSection(LLVMContext &Context, const std::string &SectionName) { const MCSection *CurrentSection = Context.getCurrentSection(); if (CurrentSection != nullptr) { const std::string &CurrentSectionName = CurrentSection->getName(); if (CurrentSectionName != SectionName) { Context.setCurrentSection(SectionName); } } }" LLVM,NVPTX,458,"void outputDwarfFileDirectives(const Module &M, raw_ostream &OS) { const DWARFFileTable *FileTable = M.getDWARFFileTable(); for (const DWARFFile *File : FileTable->getFiles()) { OS << "".file "" << File->getFilename() << ""\n""; } }" LLVM,NVPTX,459,"void emitRawBytes(uint8_t *data, size_t size) { for (size_t i = 0; i < size; i++) { emitByte(data[i]); } }" LLVM,NVPTX,460,"void changeSection(LLVMContext &Context, const std::string &SectionName) { const MCSection *CurrentSection = Context.getCurrentSection(); if (CurrentSection != nullptr) { const std::string &CurrentSectionName = CurrentSection->getName(); if (CurrentSectionName != SectionName) { Context.setCurrentSection(SectionName); } } }" LLVM,NVPTX,461,"void closeLastSection(LLVMContext &Context, NVPTXTargetMachine &Target) { const MCSection *LastSection = Target.getLastSection(); Target.closeSection(LastSection); Target.updateSectionList(LastSection, nullptr); }" LLVM,NVPTX,462,"void emitRawBytes(uint8_t *data, size_t size) { for (size_t i = 0; i < size; i++) { emitByte(data[i]); } }" LLVM,NVPTX,463,"void outputDwarfFileDirectives(const Module &M, raw_ostream &OS) { const DWARFFileTable *FileTable = M.getDWARFFileTable(); for (const DWARFFile *File : FileTable->getFiles()) { OS << "".file "" << File->getFilename() << ""\n""; } }" LLVM,NVPTX,464,float getArithmeticInstrCost(Instruction *I) { float cost = 0.0f; if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::Xor || I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::And) { cost = 1.0f / (float)I->getNumOperands(); } return cost; } LLVM,NVPTX,465,"TargetLowering *getTLI(const NVPTXTargetMachine &TM) { TargetLowering *TLI = new TargetLowering(TM); TLI->setTargetTriple(""nvptx-nvidia-cuda""); TLI->setTargetArch(""nvptx""); TLI->setTargetVendor(""nvidia""); return TLI; } ``` This function takes a `NVPTXTargetMachine` object as an argument, which is used to create a new `TargetLowering` object for the NVPTX backend. The `TargetLowering` object is then configured with the target specific values for the NVPTX backend, such as the target triple, architecture, and vendor. You can use this function in your LLVM code to get a `TargetLowering` object for the NVPTX backend, like this: ``` int main() { NVPTXTargetMachine TM; TargetLowering *TLI = getTLI(TM); return 0; }" LLVM,NVPTX,466,"static const char *getUnrollingPreferences(const Triple &Triple) { if (Triple.getArch() == Triple::nvptx) { return ""NVPTX,NVPTX,4""; } return """"; } ``` This function takes a `Triple` object as an argument, which represents the target architecture and vendor. If the target architecture is `nvptx`, the function returns the string `""NVPTX,NVPTX,4""`, which indicates that the generic loop unrolling transformation should be applied with a factor of 4. If the target architecture is not `nvptx`, the function returns an empty string. You can use this function in your LLVM Compiler pass to get the target-customized preferences for the generic loop unrolling transformation. For example: ``` #include ""llvm/ADT/StringRef.h"" #include ""llvm/ADT/Triple.h"" #include ""llvm/Support/TargetSelect.h"" using namespace llvm; static const char *getUnrollingPreferences(const Triple &Triple) { if (Triple.getArch() == Triple::nvptx) { return ""NVPTX,NVPTX,4""; } return """"; } const char *unrollingPreferences = getUnrollingPreferences(Triple); if (unrollingPreferences != """") { LoopUnrollPass(unrollingPreferences).runOnFunction(*F); }" LLVM,NVPTX,467,bool hasBranchDivergence(const Function &F) { if (!F.hasBranches()) return false; for (const BasicBlock &BB : F.getBasicBlocks()) { for (const Instruction &I : BB.getInstructions()) { if (I.isBranch()) { const BasicBlock *TargetBB = I.getSuccessor(0); if (TargetBB->isDivergent()) return true; } } } return false; } LLVM,NVPTX,468,bool isSourceOfDivergence(Value *V) { if (V->getType()->isPointerTy()) { return true; } else if (V->getType()->isIntegerTy()) { return !V->getType()->isConstant(); } else if (V->getType()->isFloatTy()) { return !V->getType()->isConstant(); } else if (V->getType()->isDoubleTy()) { return !V->getType()->isConstant(); } else if (V->getType()->isVectorTy()) { return !V->getType()->isConstant(); } else if (V->getType()->isArrayTy()) { return !V->getType()->isConstant(); } else if (V->getType()->isStructTy()) { return !V->getType()->isConstant(); } else { return false; } } LLVM,NVPTX,469,float getArithmeticInstrCost(Instruction *I) { float cost = 0.0f; if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::Xor || I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::And) { cost = 1.0f / (float)I->getNumOperands(); } return cost; } LLVM,NVPTX,470, LLVM,NVPTX,471,"static const char *getUnrollingPreferences(const Triple &Triple) { if (Triple.getArch() == Triple::nvptx) { return ""NVPTX,NVPTX,4""; } return """"; } ``` This function takes a `Triple` object as an argument, which represents the target architecture and vendor. If the target architecture is `nvptx`, the function returns the string `""NVPTX,NVPTX,4""`, which indicates that the generic loop unrolling transformation should be applied with a factor of 4. If the target architecture is not `nvptx`, the function returns an empty string. You can use this function in your LLVM Compiler pass to get the target-customized preferences for the generic loop unrolling transformation. For example: ``` #include ""llvm/ADT/StringRef.h"" #include ""llvm/ADT/Triple.h"" #include ""llvm/Support/TargetSelect.h"" using namespace llvm; static const char *getUnrollingPreferences(const Triple &Triple) { if (Triple.getArch() == Triple::nvptx) { return ""NVPTX,NVPTX,4""; } return """"; } const char *unrollingPreferences = getUnrollingPreferences(Triple); if (unrollingPreferences != """") { LoopUnrollPass(unrollingPreferences).runOnFunction(*F); }" LLVM,NVPTX,472," const TargetIntrinsicInfo &TII) { if (Inst->getOpcode() == Instruction::TargetIntrinsic) { unsigned IntrinsicID = Inst->getTargetIntrinsicID(); if (IntrinsicID == NVPTXIntrinsic::nvptx_intrinsic) { StringRef IntrinsicName = TII.getIntrinsicName(IntrinsicID); if (IntrinsicName == ""nvptx_combine_intrinsic"") { OperandBundleDef *OperandBundle = Inst->getOperandBundle(0); OperandBundleDef *OperandBundle2 = Inst->getOperandBundle(1); Instruction *NewInst = Instruction::Create(Inst->getOpcode(), Inst->getType(), OperandBundle->getOperand(0), OperandBundle2->getOperand(0)); Inst->replaceAllUsesWith(NewInst); Inst->eraseFromParent(); } } } }" LLVM,NVPTX,473,float getArithmeticInstrCost(Instruction *I) { float cost = 0.0f; if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::Xor || I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::And) { cost = 1.0f / (float)I->getNumOperands(); } return cost; } LLVM,NVPTX,474,float getArithmeticInstrCost(Instruction *I) { float cost = 0.0f; if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::Xor || I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::And) { cost = 1.0f / (float)I->getNumOperands(); } return cost; } LLVM,NVPTX,475,float getArithmeticInstrCost(Instruction *I) { float cost = 0.0f; if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::Xor || I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::And) { cost = 1.0f / (float)I->getNumOperands(); } return cost; } LLVM,NVPTX,476,bool canHaveNonUndefGlobalInitializerInAddressSpace(const Target &target) { if (target.getTriple().getArch() != Triple::nvptx) { return false; } if (!target.hasFeature(Target::NVPTX::NonUndefGlobalInitializer)) { return false; } return true; } LLVM,NVPTX,477,"void getAdjustedAnalysisPointer(Pass *P, AnalysisID ID) { if (ID == NVPTXAnalysisID) { return &P->getAnalysis(); } return nullptr; }" LLVM,NVPTX,478,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,479,float getArithmeticInstrCost(Instruction *I) { float cost = 0.0f; if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::Xor || I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::And) { cost = 1.0f / (float)I->getNumOperands(); } return cost; } LLVM,NVPTX,480,bool hasBranchDivergence(const Function &F) { if (F.getBasicBlockList().empty()) return false; for (auto &BB : F.getBasicBlockList()) { if (BB.getTerminator()->getOpcode() == Instruction::Br) { auto *Br = cast(BB.getTerminator()); if (Br->isDivergent()) return true; } } return false; } LLVM,NVPTX,481,"void initializePass(LLVMPassManagerRef PM, LLVMTargetMachineRef TM) { LLVMSetTarget(TM, ""nvptx""); LLVMSetTargetTriple(TM, ""nvptx-unknown-unknown""); LLVMSetTargetData(TM, LLVMCreateTargetData(""nvptx"")); LLVMSetPassName(PM, ""nvptx-optimize""); LLVMSetPassDebugLogging(PM, true); LLVMSetPassTiming(PM, true); }" LLVM,NVPTX,482,float getArithmeticInstrCost(Instruction *I) { float cost = 0.0f; if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::Xor || I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::And) { cost = 1.0f / (float)I->getNumOperands(); } return cost; } LLVM,NVPTX,483, const TargetLibraryInfo &CallerTLI) { if (CalleeTLI.hasAttribute(Attribute::NoBuiltin) != CallerTLI.hasAttribute(Attribute::NoBuiltin)) { return false; } if (CalleeTLI.hasAttribute(Attribute::NVPTX) != CallerTLI.hasAttribute(Attribute::NVPTX)) { return false; } if (CalleeTLI.hasAttribute(Attribute::NVPTXVersion) != CallerTLI.hasAttribute(Attribute::NVPTXVersion)) { return false; } if (CalleeTLI.hasAttribute(Attribute::NVPTXArch) != CallerTLI.hasAttribute(Attribute::NVPTXArch)) { return false; } if (CalleeTLI.hasAttribute(Attribute::NVPTXFeatures) != CallerTLI.hasAttribute(Attribute::NVPTXFeatures)) { return false; } if (CalleeTLI.hasAttribute(Attribute::NVPTXExtensions) != CallerTLI.hasAttribute(Attribute::NVPTXExtensions)) { return false; } LLVM,NVPTX,484,float getArithmeticInstrCost(Instruction *I) { float cost = 0.0f; if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::Xor || I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::And) { cost = 1.0f / (float)I->getNumOperands(); } return cost; } LLVM,NVPTX,485,unsigned getFlatAddressSpace(LLVMContext &Context) { const Target &NVPTXTarget = Context.getTarget(); unsigned FlatAddressSpaceID = NVPTXTarget.getAddressSpaceID(NVPTXTarget.getFlatAddressSpace()); return FlatAddressSpaceID; } LLVM,NVPTX,486,int getNumberOfRegisters(LLVMContext &Context) { int NVPTX = Context.getTargetSpecificValue(NVPTX_1); int NumRegisters = Context.getTargetInfo().getNumberOfRegisters(NVPTX); return NumRegisters; } LLVM,NVPTX,487,"static const char *getUnrollingPreferences(const Triple &Triple) { if (Triple.getArch() == Triple::nvptx) { return ""NVPTX,NVPTX,4""; } return """"; } ``` This function takes a `Triple` object as an argument, which represents the target architecture and vendor. If the target architecture is `nvptx`, the function returns the string `""NVPTX,NVPTX,4""`, which indicates that the generic loop unrolling transformation should be applied with a factor of 4. If the target architecture is not `nvptx`, the function returns an empty string. You can use this function in your LLVM Compiler pass to get the target-customized preferences for the generic loop unrolling transformation. For example: ``` #include ""llvm/ADT/StringRef.h"" #include ""llvm/ADT/Triple.h"" #include ""llvm/Support/TargetSelect.h"" using namespace llvm; static const char *getUnrollingPreferences(const Triple &Triple) { if (Triple.getArch() == Triple::nvptx) { return ""NVPTX,NVPTX,4""; } return """"; } const char *unrollingPreferences = getUnrollingPreferences(Triple); if (unrollingPreferences != """") { LoopUnrollPass(unrollingPreferences).runOnFunction(*F); }" LLVM,NVPTX,488,"bool hasVolatileVariant(Instruction *I) { if (!I->isMemoryAccess()) return false; if (I->getMetadata(""volatile"")) return true; if (I->getOpcode() == Instruction::Load || I->getOpcode() == Instruction::Store) { if (I->getMetadata(""volatile"")) return true; } return false; }" LLVM,NVPTX,489,"unsigned int getAlign(LLVMContext &Context, const Value *V) { const TargetSpecificValue *TSValue = Context.getTargetSpecificValue(V); if (!TSValue) return 0; if (TSValue->getTarget() != NVPTX) return 0; const unsigned int CallAlign = TSValue->getCallAlign(); if (CallAlign == 0) return 0; return CallAlign; }" LLVM,NVPTX,490,"unsigned int getAlign(LLVMContext &Context, const Value *V) { const TargetSpecificValue *TSValue = Context.getTargetSpecificValue(V); if (!TSValue) return 0; if (TSValue->getTarget() != NVPTX) return 0; const unsigned int CallAlign = TSValue->getCallAlign(); if (CallAlign == 0) return 0; return CallAlign; }" LLVM,NVPTX,491,Instruction *getInst(Instruction *I) { if (I->getOpcode() == Instruction::NVPTX) { return I->getOperand(0); } return nullptr; } LLVM,NVPTX,492,"unsigned getAlign(unsigned align, unsigned size) { if (align == 0) { align = 16; } if (size % align != 0) { size = (size / align) * align + align; } return size; }" LLVM,NVPTX,493,"unsigned int getAlign(LLVMContext &Context, const Value *V) { const TargetSpecificValue *TSValue = Context.getTargetSpecificValue(V); if (!TSValue) return 0; if (TSValue->getTarget() != NVPTX) return 0; const unsigned int CallAlign = TSValue->getCallAlign(); if (CallAlign == 0) return 0; return CallAlign; }" LLVM,NVPTX,494,"unsigned int getAlign(LLVMContext &Context, const Value *V) { const TargetSpecificValue *TSValue = Context.getTargetSpecificValue(V); if (!TSValue) return 0; if (TSValue->getTarget() != NVPTX) return 0; const unsigned int CallAlign = TSValue->getCallAlign(); if (CallAlign == 0) return 0; return CallAlign; }" LLVM,NVPTX,495,"unsigned int getAlign(LLVMContext &Context, const Value *V) { const TargetSpecificValue *TSValue = Context.getTargetSpecificValue(V); if (!TSValue) return 0; if (TSValue->getTarget() != NVPTX) return 0; const unsigned int CallAlign = TSValue->getCallAlign(); if (CallAlign == 0) return 0; return CallAlign; }" LLVM,NVPTX,496,Instruction *getInst(Instruction *I) { if (I->getOpcode() == Instruction::Call && I->getCalledFunction()->getIntrinsicID() == Intrinsic::nvptx_0) { return I->getOperand(0); } return nullptr; } LLVM,NVPTX,497,"void run(Function &F, DominatorTree &DT) { const Target &NVPTXTarget = F.getParent()->getTarget(); DT.create(F); for (BasicBlock &BB : F) { Instruction *Term = BB.getTerminator(); if (Term->getOpcode() == Instruction::Br) { DT.add(Term, BB); } } DT.compute(); }" LLVM,NVPTX,498,"class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} bool runOnFunction(Function &F) override { return false; } }; char NVPTXPass::ID = 0; static RegisterPass X(""nvptx-pass"", ""NVPTX Pass""); ``` This code defines a new pass called `NVPTXPass` that inherits from the `FunctionPass` class. The `runOnFunction` method is overridden to do the per-function processing of the pass. The `runOnFunction` method takes a `Function` object as an argument, which represents the current function being processed by the pass. The method returns a `bool` value indicating whether the pass should continue processing the function. In this example, the `runOnFunction` method does not do anything, but you can add your own code to perform the per-function processing of the pass. You can then use the `NVPTXPass` class in your LLVM code by creating an instance of the class and calling the `runOnFunction` method on it. For example: ``` int main() { NVPTXPass pass; Function *F = new Function(""my_function"", Type::getVoidTy(getGlobalContext())); pass.runOnFunction(*F); return 0; }" LLVM,NVPTX,499, LLVM,NVPTX,500,"class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} bool runOnFunction(Function &F) override { Module *M = F.getParent(); Value *TargetSpecificValue = M->getOrInsertGlobal(""nvvm_read_ptx_sreg_tid_x"", Type::getInt32Ty(M->getContext())); IntrinsicInst *Intrinsic = IntrinsicInst::Create(Intrinsic::nvvm_read_ptx_sreg_tid_x, TargetSpecificValue, F.getEntryBlock().getFirstNonPHI()); Intrinsic->setTargetSpecificValue(TargetSpecificValue); F.getEntryBlock().getInstList().push_back(Intrinsic); return true; }" LLVM,NVPTX,501,"void runOnFunction(Function &F) { if (!F.hasAttribute(Attribute::NVPTX)) return; Attribute::NVPTX nvptx = F.getAttribute(Attribute::NVPTX); if (!nvptx.hasAttribute(Attribute::_reflect)) return; Attribute::_reflect reflect = nvptx.getAttribute(Attribute::_reflect); if (reflect.getReturnType() != Type::getInt32Ty(F.getContext())) return; if (reflect.getNumOperands() != 4) return; if (reflect.getIntrinsic() != Intrinsic::nvvm_reflect) return; if (reflect.getOperand(0) != Intrinsic::nvvm_reflect || reflect.getOperand(1) != 2 || reflect.getOperand(2) != 0 || reflect.getOperand(3) != 0) return; if (reflect.getFormat() != ""__nvvm_reflect"") return; if (reflect.getTargetSpecificValue() != ""nvvm-reflect-ftz"") return; }" LLVM,NVPTX,502,"void runOnModule(Module &M) override { StringRef ModuleName = M.getName(); StringRef TargetTriple = M.getTargetTriple(); if (TargetTriple.startsWith(""nvptx"")) { Function *ReflectFunc = M.getFunction(""_reflect""); if (ReflectFunc && ReflectFunc->getReturnType() == Type::getInt32Ty(M.getContext()) && ReflectFunc->getNumOperands() == 1 && ReflectFunc->getOperand(0)->getType() == Type::getInt32Ty(M.getContext())) { Value *Arg = ReflectFunc->getOperand(0); if (isa(Arg)) { int64_t Value = cast(Arg)->getSExtValue(); if (Value >= 0 && Value <= 3) { M.setTargetTriple(Twine(""nvptx-"") + Twine(Value)); } } } } }" LLVM,NVPTX,503,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,504,"void run(Function &F, DominatorTree &DT) { const Target &NVPTXTarget = F.getParent()->getTarget(); DT.create(F); for (BasicBlock &BB : F) { Instruction *Term = BB.getTerminator(); if (Term->getOpcode() == Instruction::Br) { DT.add(Term, BB); } } DT.compute(); }" LLVM,NVPTX,505,"class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} bool runOnFunction(Function &F) override { return false; } }; char NVPTXPass::ID = 0; static RegisterPass X(""nvptx-pass"", ""NVPTX Pass""); ``` This code defines a new pass called `NVPTXPass` that inherits from the `FunctionPass` class. The `runOnFunction` method is overridden to do the per-function processing of the pass. The `runOnFunction` method takes a `Function` object as an argument, which represents the current function being processed by the pass. The method returns a `bool` value indicating whether the pass should continue processing the function. In this example, the `runOnFunction` method does not do anything, but you can add your own code to perform the per-function processing of the pass. You can then use the `NVPTXPass` class in your LLVM code by creating an instance of the class and calling the `runOnFunction` method on it. For example: ``` int main() { NVPTXPass pass; Function *F = new Function(""my_function"", Type::getVoidTy(getGlobalContext())); pass.runOnFunction(*F); return 0; }" LLVM,NVPTX,506,"void runOnFunction(Function &F) { if (!F.hasAttribute(Attribute::NVPTX)) return; Attribute::NVPTX nvptx = F.getAttribute(Attribute::NVPTX); if (!nvptx.hasAttribute(Attribute::_reflect)) return; Attribute::_reflect reflect = nvptx.getAttribute(Attribute::_reflect); if (reflect.getReturnType() != Type::getInt32Ty(F.getContext())) return; if (reflect.getNumOperands() != 4) return; if (reflect.getIntrinsic() != Intrinsic::nvvm_reflect) return; if (reflect.getOperand(0) != Intrinsic::nvvm_reflect || reflect.getOperand(1) != 2 || reflect.getOperand(2) != 0 || reflect.getOperand(3) != 0) return; if (reflect.getFormat() != ""__nvvm_reflect"") return; if (reflect.getTargetSpecificValue() != ""nvvm-reflect-ftz"") return; }" LLVM,NVPTX,507,"void runOnFunction(Function &F) { if (!F.hasAttribute(""nvptx"")) { return; } Attribute nvptxAttr = F.getAttribute(""nvptx""); if (nvptxAttr.getKind() == Attribute::Reflect) { Function *reflectFunc = nvptxAttr.getReflectFunction(); if (reflectFunc->getReturnType() != Type::getInt32Ty(F.getContext())) { return; } if (reflectFunc->getNumOperands() != 2) { return; } if (reflectFunc->getFormat() != ""__nvvm_reflect"") { return; } if (reflectFunc->getTargetSpecificValue() != 10) { return; } reflectFunc->run(); } }" LLVM,NVPTX,508,"void runOnModule(Module &M) override { const Triple &TT = M.getTargetTriple(); if (TT.getArch() != Triple::nvptx) { return; } const DataLayout &DL = M.getDataLayout(); NVPTXTargetMachine *TM = new NVPTXTargetMachine(DL, TT); M.setTargetMachine(TM); NVPTXBackend *BE = new NVPTXBackend(M, TM); M.setBackend(BE); BE->runOnModule(M); delete TM; delete BE; }" LLVM,NVPTX,509,"void runOnFunction(Function &F) { if (!F.hasAttribute(Attribute::NVPTX)) return; Attribute::NVPTX nvptx = F.getAttribute(Attribute::NVPTX); if (!nvptx.hasAttribute(Attribute::_reflect)) return; Attribute::_reflect reflect = nvptx.getAttribute(Attribute::_reflect); if (reflect.getReturnType() != Type::getInt32Ty(F.getContext())) return; if (reflect.getNumOperands() != 4) return; if (reflect.getIntrinsic() != Intrinsic::nvvm_reflect) return; if (reflect.getOperand(0) != Intrinsic::nvvm_reflect || reflect.getOperand(1) != 2 || reflect.getOperand(2) != 0 || reflect.getOperand(3) != 0) return; if (reflect.getFormat() != ""__nvvm_reflect"") return; if (reflect.getTargetSpecificValue() != ""nvvm-reflect-ftz"") return; }" LLVM,NVPTX,510,"void runOnModule(Module &M) override { const Triple &TT = M.getTargetTriple(); if (TT.getArch() != Triple::nvptx) { return; } const DataLayout &DL = M.getDataLayout(); NVPTXTargetMachine *TM = new NVPTXTargetMachine(DL, TT); M.setTargetMachine(TM); NVPTXBackend *BE = new NVPTXBackend(M, TM); M.setBackend(BE); BE->runOnModule(M); delete TM; delete BE; }" LLVM,NVPTX,511,void getAnalysisUsage(AnalysisUsage &AU) { AU.addTargetSpecificAnalysis(); } LLVM,NVPTX,512,"void runOnModule(Module &M) override { const char *targetName = ""NVPTX""; const char *targetVersion = ""1.0""; const char *targetArch = ""5""; const char *targetOS = ""0""; const char *targetABI = ""0""; const char *intrinsicName = Intrinsic::getName(M); if (intrinsicName == ""nvvm_reflect"") { const char *reflectMetadata = Intrinsic::nvvm_reflect(M); if (reflectMetadata != nullptr) { const char *reflectTargetName = ""NVPTX""; const char *reflectTargetVersion = ""1.0""; const char *reflectTargetArch = ""5""; const char *reflectTargetOS = ""0""; const char *reflectTargetABI = ""0""; if (reflectTargetName == targetName && reflectTargetVersion == targetVersion && reflectTargetArch == targetArch && reflectTargetOS == targetOS && reflectTargetABI == targetABI) { } } } }" LLVM,NVPTX,513,"const char *getPassName(const Pass *P) { if (P->getPassID() == NVPTX_CONVERT_VECTOR_TYPES_TO_ELEMENT_TYPES) { return ""Convert LLVM vector types to their element types""; } return P->getPassName(); }" LLVM,NVPTX,514,"void runOnMachineFunction(MachineFunction &MF) { const TargetSpecificValue *NVPTX = MF.getTargetSpecificValue(NVPTX, 1, 0); MF.emitFunctionBody(); MF.emitFunctionEpilogue(); }" LLVM,RISCV,0,"bool fixupNeedsRelaxation(const MCInst &Inst, const MCSubtargetInfo &STI) { if (Inst.getOpcode() == RISCV::JAL || Inst.getOpcode() == RISCV::JALR) { if (Inst.getOperand(0).isSymbol()) { if (Inst.getOperand(0).getSymbol().isFunction()) { if (Inst.getOperand(0).getSymbol().isResolved()) { return false; } } } } return true; }" LLVM,RISCV,1, LLVM,RISCV,2,unsigned getNumFixupKinds() { return RISCV::NumTargetFixupKinds; } LLVM,RISCV,3,"void applyFixup(uint8_t *data, uint64_t offset, uint64_t value, uint64_t fixupKind) { switch (fixupKind) { case RISCV_RISCV: data[offset] = value; break; case RISCV_RISCV_7: data[offset] = value; break; case RISCV_RISCV_8: data[offset] = value; break; default: break; } }" LLVM,RISCV,4,"namespace { class RISCVObjectWriter : public MCObjectWriter { public: RISCVObjectWriter(MCContext &Ctx, raw_ostream &OS) : MCObjectWriter(Ctx, OS) {} void writeObject(const MCAsmLayout &Layout) override { writeELFHeader(Layout); writeProgramHeaders(Layout); writeSectionHeaders(Layout); writeSectionData(Layout); writeRelocations(Layout); writeSymbolTable(Layout); writeStringTable(Layout); }" LLVM,RISCV,5,"bool fixupNeedsRelaxation(const MCInst &Inst, const MCSubtargetInfo &STI) { if (Inst.getOpcode() != RISCV::Instruction::RISCV_INST) return false; if (Inst.getOperand(0).isResolved()) return false; if (Inst.getOperand(0).isRelaxationHint()) return true; if (Inst.getOperand(0).isRelaxationOperand()) return true; return false; }" LLVM,RISCV,6, LLVM,RISCV,7,bool mayNeedRelaxation(const Instruction *I) { if (I->getOpcode() == RISCV::Opcode::RISCV_INSTRUCTION) { if (I->hasRelaxedVersion()) { if (I->getRelaxedVersion()->isAvailable()) { return true; } } } return false; } LLVM,RISCV,8,"void RISCVAsmBackend::relaxInstruction(const MCInst &Inst, MCInst &RelaxedInst) { const MCInstrDesc &InstDesc = Inst.getDesc(); unsigned Opcode = InstDesc.getOpcode(); if (Opcode == RISCV::Instruction::Opcode) { const MCOperand *Operands = Inst.getOperands(); for (unsigned i = 0; i < InstDesc.getNumOperands(); ++i) { const MCOperand &Operand = Operands[i]; if (Operand.isReg() && Operand.getReg() == RISCV::GPR) { RelaxedInst = Inst; RelaxedInst.setOpcode(RISCV::Instruction::Opcode); RelaxedInst.setOperand(i, Operand.getReg()); return; } } } RelaxedInst = Inst; }" LLVM,RISCV,9,"void writeNopData(uint8_t* data, uint32_t count) { uint32_t i; for (i = 0; i < count; i += 4) { *data++ = 0x13; } }" LLVM,RISCV,10,"void applyFixup(uint8_t *data, uint64_t offset, uint64_t value, uint64_t fixupKind) { switch (fixupKind) { case RISCV_RISCV: data[offset] = value; break; case RISCV_RISCV_8: data[offset] = value & 0xff; break; case RISCV_RISCV_8_8: data[offset] = (value >> 8) & 0xff; break; default: break; } }" LLVM,RISCV,11, LLVM,RISCV,12, LLVM,RISCV,13, const MachineRelocation &MR) { if (MR.getSymbol()->getType() == RISCV::FeatureRelax) { if (MR.getInstruction()->getOpcode() == RISCV::Instruction::Relax) { if (MR.getOperand(0)->getType() == RISCV::Register) { return true; } } } return false; } LLVM,RISCV,14, LLVM,RISCV,15,"void applyFixup(uint8_t *data, uint64_t offset, uint64_t value, uint64_t fixupKind) { switch (fixupKind) { case RISCV_RISCV: data[offset] = value; break; case RISCV_RISCV_8: data[offset] = value & 0xff; break; case RISCV_RISCV_8_8: data[offset] = (value >> 8) & 0xff; break; default: break; } }" LLVM,RISCV,16,"bool fixupNeedsRelaxationAdvanced(const MCExpr *Expr, unsigned FixupKind) { switch (FixupKind) { case RISCV::fixup_riscv_rvc_branch: return Expr->getKind() == MCExpr::SymbolRef && cast(Expr->getSymbol())->isRVCBranch(); case RISCV::fixup_riscv_rvc_jump: return Expr->getKind() == MCExpr::SymbolRef && cast(Expr->getSymbol())->isRVCJump(); default: return false; } }" LLVM,RISCV,17,unsigned getFixupKind(const MCExpr *Expr) { if (Expr->getKind() == MCExpr::SymbolRef) { const MCSymbol *Sym = Expr->getSymbol(); if (Sym->isDefined()) { return BFD_RELOC_NONE; } else { return BFD_RELOC_32; } } else if (Expr->getKind() == MCExpr::Constant) { return BFD_RELOC_64; } else { return 1u; } } LLVM,RISCV,18, LLVM,RISCV,19,"void relaxInstruction(RISCV::Instruction *inst) { RISCV::Opcode opcode = inst->getOpcode(); switch (opcode) { case RISCV::C_BEQZ: inst->setOpcode(RISCV::BEQ); break; case RISCV::C_BNEZ: inst->setOpcode(RISCV::BNE); break; case RISCV::C_J: inst->setOpcode(RISCV::JAL); break; case RISCV::C_JAL: inst->setOpcode(RISCV::JAL); break; default: llvm_unreachable(""Opcode not expected!""); } }" LLVM,RISCV,20,"bool shouldForceRelocation(const MachineInstr &MI, const MachineReloc &MR) { if (MR.getType() == RISCV::fixup_riscv_got_hi20 || MR.getType() == RISCV::fixup_riscv_tls_got_hi20 || MR.getType() == RISCV::fixup_riscv_tls_gd_hi20) { if (MI.getFeatureRelax() == RISCV::FeatureRelax::Relax) { return true; } } return false; }" LLVM,RISCV,21," unsigned int CodeAlign) { if (Relax.getFeature() == RISCV::FeatureRelax::RISCV && StdExtC.getFeature() == RISCV::FeatureStdExtC::RISCV && CodeAlign == 2 && Relax.getFeature() == RISCV::FeatureRelax::RISCV && StdExtC.getFeature() == RISCV::FeatureStdExtC::RISCV && CodeAlign == 4) { return true; } return false; } ``` This function takes three arguments: `Relax`, `StdExtC`, and `CodeAlign`. It checks if the target specific value is `RISCV,RISCV,RISCV::FeatureRelax,RISCV::FeatureStdExtC,2,4` and returns `true` if it is, otherwise it returns `false`. You can use this function in your LLVM compiler by calling it in the appropriate place, such as in the `emitCodeAlign` function. ``` void emitCodeAlign(const RISCV::FeatureRelax &Relax, const RISCV::FeatureStdExtC &StdExtC, unsigned int CodeAlign) { if (shouldInsertExtraNopBytesForCodeAlign(Relax, StdExtC, CodeAlign)) { } }" LLVM,RISCV,22,bool shouldInsertFixupForCodeAlign(RISCV::FeatureRelax &FR) { if (FR.getTarget().getTargetMachine().getTargetTriple().getArch() == Triple::riscv) { return true; } return false; } LLVM,RISCV,23,"void writeNopData(uint8_t* data, uint32_t count) { if (count % 4 != 0) { count = (count + 3) & ~3; } for (uint32_t i = 0; i < count; i += 4) { data[i] = 0x13; data[i + 1] = 0x0; data[i + 2] = 0x0; data[i + 3] = 0x0; } }" LLVM,RISCV,24,"void writeNopData(uint8_t* data, uint32_t count) { if (count % 4 != 0) { count = (count + 3) & ~3; } for (uint32_t i = 0; i < count; i += 4) { data[i] = 0x13; data[i + 1] = 0x0; data[i + 2] = 0x0; data[i + 3] = 0x0; } }" LLVM,RISCV,25, LLVM,RISCV,26," unsigned int CodeAlign) { if (Relax.getFeature() == RISCV::FeatureRelax::RISCV && StdExtC.getFeature() == RISCV::FeatureStdExtC::RISCV && CodeAlign == 2 && Relax.getFeature() == RISCV::FeatureRelax::RISCV && StdExtC.getFeature() == RISCV::FeatureStdExtC::RISCV && CodeAlign == 4) { return true; } return false; } ``` This function takes three arguments: `Relax`, `StdExtC`, and `CodeAlign`. It checks if the target specific value is `RISCV,RISCV,RISCV::FeatureRelax,RISCV::FeatureStdExtC,2,4` and returns `true` if it is, otherwise it returns `false`. You can use this function in your LLVM compiler by calling it in the appropriate place, such as in the `emitCodeAlign` function. ``` void emitCodeAlign(const RISCV::FeatureRelax &Relax, const RISCV::FeatureStdExtC &StdExtC, unsigned int CodeAlign) { if (shouldInsertExtraNopBytesForCodeAlign(Relax, StdExtC, CodeAlign)) { } }" LLVM,RISCV,27,bool shouldInsertFixupForCodeAlign(RISCV::FeatureRelax &FR) { if (FR.getTarget().getTargetMachine().getTargetTriple().getArch() == Triple::riscv) { return true; } return false; } LLVM,RISCV,28," llvm::RISCV::RISCV &riscv) { llvm::Value *value = fixup.getValue(); uint64_t offset = fixup.getOffset(); switch (fixup.getKind()) { case llvm::Fixup::Kind::RISCV_HI: data.setUInt16(offset, value->getUInt16()); break; case llvm::Fixup::Kind::RISCV_LO: data.setUInt16(offset, value->getUInt16()); break; case llvm::Fixup::Kind::RISCV_PCREL_HI: data.setUInt16(offset, value->getUInt16() - riscv.getPC()); break; case llvm::Fixup::Kind::RISCV_PCREL_LO: data.setUInt16(offset, value->getUInt16() - riscv.getPC()); break; default: llvm_unreachable(""Unsupported fixup kind""); } }" LLVM,RISCV,29,unsigned getNumFixupKinds() { return RISCV_1; } LLVM,RISCV,30,"void RISCVAsmBackend::relaxInstruction(const MCInst &Inst, MCInst &RelaxedInst) { const MCInstrDesc &InstDesc = Inst.getDesc(); unsigned Opcode = InstDesc.getOpcode(); if (Opcode == RISCV::Instruction::Opcode) { const MCOperand *Operands = Inst.getOperands(); for (unsigned i = 0; i < InstDesc.getNumOperands(); ++i) { const MCOperand &Operand = Operands[i]; if (Operand.isReg() && Operand.getReg() == RISCV::GPR) { RelaxedInst = Inst; RelaxedInst.setOpcode(RISCV::Instruction::Opcode); RelaxedInst.setOperand(i, Operand.getReg()); return; } } } RelaxedInst = Inst; }" LLVM,RISCV,31, const MachineRelocation &MR) { if (MR.getSymbol()->getType() == RISCV::FeatureRelax) { if (MR.getInstruction()->getOpcode() == RISCV::Instruction::Relax) { if (MR.getOperand(0)->getType() == RISCV::Register) { return true; } } } return false; } LLVM,RISCV,32, const MCSubtargetInfo &STI) { if (Fixup.getKind() == RISCV::fixup_riscv_rvc_branch || Fixup.getKind() == RISCV::fixup_riscv_rvc_jump || Fixup.getKind() == RISCV::fixup_riscv_rvc_cjump) { if (Inst.getOpcode() == RISCV::BRANCH || Inst.getOpcode() == RISCV::JUMP || Inst.getOpcode() == RISCV::CJUMP) { if (Operand.isReg()) { if (Operand.getReg() >= RISCV::R0 && Operand.getReg() <= RISCV::R31) { if (Operand.getReg() == RISCV::R0 || Operand.getReg() == RISCV::R1 || Operand.getReg() == RISCV::R2 || Operand.getReg() == RISCV::R3) { return true; } } } } } return false; } LLVM,RISCV,33,"void relaxInstruction(RISCV::Instruction *inst) { switch (inst->getOpcode()) { case RISCV::C_BEQZ: inst->setOpcode(RISCV::BEQ); inst->setOperand(0, RISCV::X0); inst->setOperand(1, 1); break; case RISCV::C_BNEZ: inst->setOpcode(RISCV::BNE); inst->setOperand(0, RISCV::X0); inst->setOperand(1, 1); break; case RISCV::C_J: inst->setOpcode(RISCV::JAL); inst->setOperand(0, RISCV::X0); inst->setOperand(1, 0); break; case RISCV::C_CJAL: inst->setOpcode(RISCV::CJAL); inst->setOperand(0, RISCV::X1); inst->setOperand(1, 0); break; default: llvm_unreachable(""Unexpected opcode in relaxInstruction""); } }" LLVM,RISCV,34,"bool shouldForceRelocation(const MachineInstr &MI, const Target &Target) { if (MI.getOpcode() == RISCV::fixup_riscv_got_hi20 || MI.getOpcode() == RISCV::fixup_riscv_tls_got_hi20 || MI.getOpcode() == RISCV::fixup_riscv_tls_gd_hi20 || MI.getOpcode() == RISCV::fixup_riscv_captab_pcrel_hi20 || MI.getOpcode() == RISCV::fixup_riscv_tls_ie_captab_pcrel_hi20 || MI.getOpcode() == RISCV::fixup_riscv_tls_gd_captab_pcrel_hi20) { if (Target.getFeatureRelax()) { return true; } } return false; }" LLVM,RISCV,35, const MachineRelocation &MR) { if (MR.getKind() == RISCV::fixup_riscv_got_hi20) { if (MF.getGOTEntry(MR.getSymbol())) { return false; } } if (MR.getKind() == RISCV::fixup_riscv_pcrel_lo12_i || MR.getKind() == RISCV::fixup_riscv_pcrel_lo12_s) { if (MF.getPCRelativeRelocation(MR.getSymbol())) { return false; } } if (MR.getKind() == RISCV::fixup_riscv_got_hi20) { if (MF.getGOTEntry(MR.getSymbol())) { return false; } } LLVM,RISCV,36,"bool shouldForceRelocation(const MachineInstr &MI, const MachineReloc &MR) { if (MR.getType() == RISCV::fixup_riscv_got_hi20 || MR.getType() == RISCV::fixup_riscv_tls_got_hi20 || MR.getType() == RISCV::fixup_riscv_tls_gd_hi20) { if (MI.getFeatureRelax() == RISCV::FeatureRelax::Relax) { return true; } } return false; }" LLVM,RISCV,37,"void applyFixup(uint8_t *data, uint64_t offset, uint64_t value, uint64_t fixupKind) { switch (fixupKind) { case RISCV_RISCV: data[offset] = value; break; case RISCV_RISCV_7: data[offset] = value; break; case RISCV_RISCV_8: data[offset] = value; break; default: break; } }" LLVM,RISCV,38,"namespace { class RISCVObjectWriter : public MCObjectWriter { public: RISCVObjectWriter(MCContext &Ctx, raw_ostream &OS) : MCObjectWriter(Ctx, OS) {} void writeObject(const MCAsmLayout &Layout) override { writeELFHeader(Layout); writeProgramHeaders(Layout); writeSectionHeaders(Layout); writeSectionData(Layout); writeRelocations(Layout); writeSymbolTable(Layout); writeStringTable(Layout); }" LLVM,RISCV,39," llvm::RISCV::RISCV &riscv) { llvm::Value *value = fixup.getValue(); uint64_t offset = fixup.getOffset(); switch (fixup.getKind()) { case llvm::Fixup::Kind::RISCV_HI: data.setUInt16(offset, value->getUInt16()); break; case llvm::Fixup::Kind::RISCV_LO: data.setUInt16(offset, value->getUInt16()); break; case llvm::Fixup::Kind::RISCV_PCREL_HI: data.setUInt16(offset, value->getUInt16() - riscv.getPC()); break; case llvm::Fixup::Kind::RISCV_PCREL_LO: data.setUInt16(offset, value->getUInt16() - riscv.getPC()); break; default: llvm_unreachable(""Unsupported fixup kind""); } }" LLVM,RISCV,40,"void applyFixup(uint8_t *data, uint32_t offset, uint32_t value, uint32_t fixupKind) { switch (fixupKind) { case RISCV_RISCV: data[offset] = value; break; case RISCV_RISCV_7: data[offset] = value; break; case RISCV_RISCV_8: data[offset] = value; break; default: break; } }" LLVM,RISCV,41, LLVM,RISCV,42,unsigned int getSize(const MCInst *MI) { if (MI->getOpcode() == RISCV) { if (MI->getOpcode() == RISCV::fixup_riscv_rvc_jump || MI->getOpcode() == RISCV::fixup_riscv_rvc_branch) { return 2; } } return 0; } LLVM,RISCV,43,unsigned getFixupKind(const MCExpr *Expr) { if (Expr->getKind() == MCExpr::SymbolRef) { const MCSymbol *Symbol = Expr->getSymbol(); if (Symbol->isDefined()) { return Symbol->getFixupKind(); } else { return RISCV::FixupKind::None; } } else if (Expr->getKind() == MCExpr::Binary) { const MCExpr *Left = Expr->getLeft(); const MCExpr *Right = Expr->getRight(); return getFixupKind(Left) | getFixupKind(Right); } else { return RISCV::FixupKind::None; } } LLVM,RISCV,44, LLVM,RISCV,45, LLVM,RISCV,46, const MCSubtargetInfo &STI) { if (Fixup.getKind() == RISCV::fixup_riscv_rvc_branch || Fixup.getKind() == RISCV::fixup_riscv_rvc_jump) { if (Inst.getOpcode() == RISCV::RISCV_OPCODE_RVC_BRANCH || Inst.getOpcode() == RISCV::RISCV_OPCODE_RVC_JUMP) { if (Fixup.getOffset() == 254 || Fixup.getOffset() == 256) { return true; } } } return false; } LLVM,RISCV,47,"void writeNopData(uint8_t *data, uint64_t count) { uint64_t i = 0; while (i < count) { data[i] = 0x13; i++; } }" LLVM,RISCV,48," llvm::RISCV::RISCV &riscv) { llvm::Value *value = fixup.getValue(); uint64_t offset = fixup.getOffset(); switch (fixup.getKind()) { case llvm::Fixup::Kind::RISCV_HI: data.setUInt16(offset, value->getUInt16()); break; case llvm::Fixup::Kind::RISCV_LO: data.setUInt16(offset, value->getUInt16()); break; case llvm::Fixup::Kind::RISCV_PCREL_HI: data.setUInt16(offset, value->getUInt16() - riscv.getPC()); break; case llvm::Fixup::Kind::RISCV_PCREL_LO: data.setUInt16(offset, value->getUInt16() - riscv.getPC()); break; default: llvm_unreachable(""Unsupported fixup kind""); } }" LLVM,RISCV,49," unsigned int CodeAlign) { if (Relax.getFeature() == RISCV::FeatureRelax::RISCV && StdExtC.getFeature() == RISCV::FeatureStdExtC::RISCV && CodeAlign == 2 && Relax.getFeature() == RISCV::FeatureRelax::RISCV && StdExtC.getFeature() == RISCV::FeatureStdExtC::RISCV && CodeAlign == 4) { return true; } return false; } ``` This function takes three arguments: `Relax`, `StdExtC`, and `CodeAlign`. It checks if the target specific value is `RISCV,RISCV,RISCV::FeatureRelax,RISCV::FeatureStdExtC,2,4` and returns `true` if it is, otherwise it returns `false`. You can use this function in your LLVM compiler by calling it in the appropriate place, such as in the `emitCodeAlign` function. ``` void emitCodeAlign(const RISCV::FeatureRelax &Relax, const RISCV::FeatureStdExtC &StdExtC, unsigned int CodeAlign) { if (shouldInsertExtraNopBytesForCodeAlign(Relax, StdExtC, CodeAlign)) { } }" LLVM,RISCV,50,bool shouldInsertFixupForCodeAlign(RISCV::FeatureRelax &FR) { if (FR.getTarget().getTargetMachine().getTargetTriple().getArch() == Triple::riscv) { return true; } return false; } LLVM,RISCV,51, LLVM,RISCV,52, LLVM,RISCV,53," llvm::RISCV::RISCV &riscv) { llvm::Value *value = fixup.getValue(); uint64_t offset = fixup.getOffset(); switch (fixup.getKind()) { case llvm::Fixup::Kind::RISCV_HI: data.setUInt16(offset, value->getUInt16()); break; case llvm::Fixup::Kind::RISCV_LO: data.setUInt16(offset, value->getUInt16()); break; case llvm::Fixup::Kind::RISCV_PCREL_HI: data.setUInt16(offset, value->getUInt16() - riscv.getPC()); break; case llvm::Fixup::Kind::RISCV_PCREL_LO: data.setUInt16(offset, value->getUInt16() - riscv.getPC()); break; default: llvm_unreachable(""Unsupported fixup kind""); } }" LLVM,RISCV,54," llvm::RISCV::RISCV &riscv) { llvm::Value *value = fixup.getValue(); uint64_t offset = fixup.getOffset(); switch (fixup.getKind()) { case llvm::Fixup::Kind::RISCV_HI: data.setUInt16(offset, value->getUInt16()); break; case llvm::Fixup::Kind::RISCV_LO: data.setUInt16(offset, value->getUInt16()); break; case llvm::Fixup::Kind::RISCV_PCREL_HI: data.setUInt16(offset, value->getUInt16() - riscv.getPC()); break; case llvm::Fixup::Kind::RISCV_PCREL_LO: data.setUInt16(offset, value->getUInt16() - riscv.getPC()); break; default: llvm_unreachable(""Unsupported fixup kind""); } }" LLVM,RISCV,55,"void applyFixup(uint8_t *data, uint64_t offset, uint64_t value, uint64_t fixupKind) { switch (fixupKind) { case RISCV_RISCV: data[offset] = value; break; case RISCV_RISCV_8: data[offset] = value & 0xff; break; case RISCV_RISCV_8_8: data[offset] = (value >> 8) & 0xff; break; default: break; } }" LLVM,RISCV,56,"bool fixupNeedsRelaxationAdvanced(const MCExpr *Expr, unsigned FixupKind) { switch (FixupKind) { case RISCV::fixup_riscv_rvc_branch: return Expr->getKind() == MCExpr::SymbolRef && cast(Expr->getSymbol())->isRVCBranch(); case RISCV::fixup_riscv_rvc_jump: return Expr->getKind() == MCExpr::SymbolRef && cast(Expr->getSymbol())->isRVCJump(); default: return false; } }" LLVM,RISCV,57, LLVM,RISCV,58,"void relaxInstruction(RISCV::Instruction *inst) { RISCV::Opcode opcode = inst->getOpcode(); switch (opcode) { case RISCV::C_BEQZ: inst->setOpcode(RISCV::BEQ); break; case RISCV::C_BNEZ: inst->setOpcode(RISCV::BNE); break; case RISCV::C_J: inst->setOpcode(RISCV::JAL); break; case RISCV::C_JAL: inst->setOpcode(RISCV::JAL); break; default: llvm_unreachable(""Opcode not expected!""); } }" LLVM,RISCV,59,"bool shouldForceRelocation(const MachineInstr &MI, const MachineReloc &MR) { if (MR.getKind() == RISCV::fixup_riscv_pcrel_lo12_i || MR.getKind() == RISCV::fixup_riscv_pcrel_lo12_s) { if (MR.getKind() == RISCV::fixup_riscv_pcrel_hi20) { if (MR.getAddend() != 0) { return true; } } } if (MR.getKind() == RISCV::fixup_riscv_pcrel_hi20 && MR.getAddend() != 0) { return true; } if (MR.getKind() == RISCV::fixup_riscv_pcrel_lo12_i && MR.getAddend() != 0) { return true; }" LLVM,RISCV,60,"void writeNopData(uint8_t *data, uint64_t count) { uint64_t i = 0; while (i < count) { data[i] = 0x13; i++; } }" LLVM,RISCV,61,"void addExpr(MCExpr *expr, int64_t value) { if (expr->getKind() != MCExpr::RISCV) { return; } int64_t riscvValue = expr->getRISCVValue(); riscvValue += value; expr->setRISCVValue(riscvValue); }" LLVM,RISCV,62, LLVM,RISCV,63, LLVM,RISCV,64,bool isMem(const MachineOperand &op) { if (op.isMemory()) { if (op.getMemory().getAddressSpace() == RISCV_ADDRESS_SPACE) { return true; } } return false; } LLVM,RISCV,65,bool isReg(const MachineOperand &MO) { if (MO.isReg()) { if (MO.getReg() >= RISCV::X0 && MO.getReg() <= RISCV::X31) { return true; } } return false; } LLVM,RISCV,66,"bool isToken(const MachineOperand &MO) { if (MO.isToken()) { return true; } if (MO.isReg() && MO.getReg() == RISCV::Token) { return true; } if (MO.isMem() && MO.getMem().getBase() == RISCV::Token) { return true; } return false; } ``` This function takes a `MachineOperand` object as an argument and returns a boolean value indicating whether the operand is a token operand. It checks if the operand is a token, a register that is a token register, or a memory operand that is a token memory operand. If any of these conditions are true, the function returns `true`, otherwise it returns `false`. You can use this function in your RISCV backend of the LLVM Compiler to determine if an operand is a token operand. For example, you could use it in the `getOperandInfo` function to determine the type of an operand: ``` OperandInfo getOperandInfo(const MachineOperand &MO) { if (isToken(MO)) { return TokenOperand; } }" LLVM,RISCV,67,"void MatchAndEmitInstruction(MCInst &Inst, MCStreamer &Streamer) { if (!Inst.isValid()) { Streamer.EmitError(""invalid instruction""); return; } MCInst::OperandList &Operands = Inst.getOperands(); if (Operands.size() != 1) { Streamer.EmitError(""too few operands for instruction""); return; } MCInst::Operand &Operand = Operands[0]; if (!Operand.isReg()) { Streamer.EmitError(""invalid operand for instruction""); return; } unsigned RegNum = Operand.getReg(); if (RegNum >= 32) { Streamer.EmitError(""invalid register number for instruction""); return; } Streamer.EmitInstruction(Inst); }" LLVM,RISCV,68,"void ParseDirective(const char *directive) { if (strncmp(directive, "".riscv"", 6) == 0) { const char *directive_name = directive + 6; const char *directive_value = strchr(directive_name, '='); if (directive_value == NULL) { return; } directive_value++; if (strncmp(directive_name, ""riscv"", 5) == 0) { const char *riscv_directive_name = directive_name + 5; const char *riscv_directive_value = strchr(riscv_directive_name, '='); if (riscv_directive_value == NULL) { return; } riscv_directive_value++; if (strncmp(riscv_directive_name, ""riscv"", 5) == 0) { const char *riscv_directive_value = strchr(riscv_directive_name, '='); if (riscv_directive_value == NULL) { return; }" LLVM,RISCV,69,"static void ParseInstruction(StringRef Instruction) { uint64_t TargetSpecificValue = TheRISCVTarget.getTargetSpecificValue(Instruction); if (TargetSpecificValue == 0) { errs() << ""unknown instruction: "" << Instruction << ""\n""; return; } if (TargetSpecificValue == 1) { errs() << ""unexpected token in argument list: "" << Instruction << ""\n""; return; } if (TargetSpecificValue == 2) { errs() << ""unexpected token in argument list: "" << Instruction << ""\n""; return; } outs() << ""Target specific value for instruction "" << Instruction << "": "" << TargetSpecificValue << ""\n""; } int main(int argc, char **argv) { InitializeLLVM(); TheRISCVTarget = GetTargetRegistry().getTarget(""riscv""); ParseInstruction(""addi x1, x2, 1""); return 0; }" LLVM,RISCV,70,"void print(Value *value, raw_ostream &stream) { stream << ""Not implemented for RISCV""; }" LLVM,RISCV,71,"void addExpr(MCExpr *expr) { assert(expr != nullptr && ""Expr shouldn't be null!""); expr->add(RISCV, RISCV, RISCV); }" LLVM,RISCV,72, LLVM,RISCV,73,"static void getLoc(Module *M, Function *F, BasicBlock *BB, Instruction *I) { DebugLoc DL = I->getDebugLoc(); if (!DL) { return; } StringRef File = DL.getFilename(); unsigned Line = DL.getLineNumber(); std::string Loc = File + "":"" + std::to_string(Line); errs() << ""Source location: "" << Loc << ""\n""; }" LLVM,RISCV,74, LLVM,RISCV,75, LLVM,RISCV,76, LLVM,RISCV,77,bool isMem(const MachineOperand &op) { if (op.isMemory()) { if (op.getMemory().getAddressSpace() == RISCV_ADDRESS_SPACE) { return true; } } return false; } LLVM,RISCV,78,bool isReg(const MachineOperand &MO) { if (MO.isReg()) { if (MO.getReg() >= RISCV::X0 && MO.getReg() <= RISCV::X31) { return true; } } return false; } LLVM,RISCV,79,"bool isToken(const MachineOperand &MO) { if (MO.isToken()) { return true; } if (MO.isReg() && MO.getReg() == RISCV::Token) { return true; } if (MO.isMem() && MO.getMem().getBase() == RISCV::Token) { return true; } return false; } ``` This function takes a `MachineOperand` object as an argument and returns a boolean value indicating whether the operand is a token operand. It checks if the operand is a token, a register that is a token register, or a memory operand that is a token memory operand. If any of these conditions are true, the function returns `true`, otherwise it returns `false`. You can use this function in your RISCV backend of the LLVM Compiler to determine if an operand is a token operand. For example, you could use it in the `getOperandInfo` function to determine the type of an operand: ``` OperandInfo getOperandInfo(const MachineOperand &MO) { if (isToken(MO)) { return TokenOperand; } }" LLVM,RISCV,80,"void ParseDirective(const char *directive) { if (strcmp(directive, "".option"") == 0) { } else if (strcmp(directive, "".attribute"") == 0) { } else if (strcmp(directive, "".insn"") == 0) { } else { } }" LLVM,RISCV,81," RISCV::FeatureRelax &Features) { StringRef Opcode, Operands; Instruction.split(Opcode, Operands); if (!RISCV::IsValidOpcode(Opcode)) { return make_error(""unexpected token"", Instruction); } SmallVector OperandList; Operands.split(OperandList, "",""); for (const auto &Operand : OperandList) { if (!RISCV::IsValidOperand(Operand)) { return make_error(""unexpected token"", Instruction); } } if (!RISCV::IsValidInstruction(Opcode, OperandList)) { return make_error(""unexpected token"", Instruction); } return Instruction; }" LLVM,RISCV,82,"void print(const char *format, ...) { va_list args; va_start(args, format); fprintf(stderr, format, args); va_end(args); }" LLVM,RISCV,83,"bool tryParseRegister(const char *str, unsigned ®) { if (strncmp(str, ""RISCV"", 5) != 0) { return false; } unsigned regNum = strtoul(str + 5, NULL, 10); if (regNum >= 32) { return false; } if (regNum >= 16 && regNum <= 23) { return false; } reg = regNum; return true; }" LLVM,RISCV,84,"bool validateTargetOperandClass(const Target &T, const Operand &Op) { if (Op.isReg()) { if (Op.getRegClass() == RISCV::FPR64RegClassID) { if (Op.getRegClass() == RISCV::FPR64CRegClassID) { if (Op.getRegClass() == RISCV::VRRegClassID) { if (Op.getRegClass() == 0) { return true; } } } } } return false; }" LLVM,RISCV,85,"void ParseDirective(const char *directive) { if (strcmp(directive, "".option"") == 0) { } else if (strcmp(directive, "".attribute"") == 0) { } else { } }" LLVM,RISCV,86,"bool validateTargetOperandClass(const Target &T, const Operand &Op) { if (Op.isReg()) { if (Op.getRegClass() == RISCV::FPR64RegClassID) { if (Op.getRegClass() == RISCV::FPR64CRegClassID) { return false; } } } return true; }" LLVM,RISCV,87,"void addExpr(MCExpr *expr) { assert(expr != nullptr && ""Expr shouldn't be null!""); if (expr->getKind() == MCExpr::RISCV) { expr->addOperand(new MCExpr(MCExpr::RISCV, 0, RISCV)); } else { expr->addOperand(new MCExpr(MCExpr::RISCV, 0, RISCV)); } }" LLVM,RISCV,88, LLVM,RISCV,89,"void print(RegisterOperand *operand, Stream *stream) { stream->write(operand->getName()); stream->write("": ""); stream->write(std::to_string(operand->getNumber())); stream->write("" (""); stream->write(operand->getType()->getName()); stream->write("")""); }" LLVM,RISCV,90,"void processInstruction(Instruction *I, DebugInfo *DI) { switch (I->getOpcode()) { case RISCV::PseudoLI: break; case RISCV::PseudoLLA: break; default: break; } }" LLVM,RISCV,91,"void print(Value *value, raw_ostream &os) { if (value->getType()->isPointerTy()) { os << ""Pointer to ""; print(value->getPointerOperand(), os); } else if (value->getType()->isIntegerTy()) { os << ""Integer ""; print(value->getIntegerOperand(), os); } else if (value->getType()->isFloatTy()) { os << ""Float ""; print(value->getFloatOperand(), os); } else if (value->getType()->isDoubleTy()) { os << ""Double ""; print(value->getDoubleOperand(), os); } else if (value->getType()->isVectorTy()) { os << ""Vector ""; print(value->getVectorOperand(), os); } else if (value->getType()->isStructTy()) { os << ""Struct ""; print(value->getStructOperand(), os); } else if (value->getType()->isArrayTy()) { os << ""Array ""; print(value->getArrayOperand(), os); } else if (value->getType()->isFunctionTy()) { os << ""Function ""; print(value->getFunctionOperand(), os); } else if (value->getType()->isLabelTy()) { os << ""Label ""; print(value->getLabelOperand(), os); } else if (value->getType()->isMetadataTy()) { os << ""Metadata ""; print(value->getMetadataOperand(), os); } else if (value->getType()->isTokenTy()) { os << ""Token ""; print(value->getTokenOperand(), os); } else { os << ""Unknown type ""; print(value->getType(), os); } }" LLVM,RISCV,92,"bool tryParseRegister(const char *str, unsigned ®) { if (strncmp(str, ""RISCV"", 5) != 0) { return false; } unsigned regNum = strtoul(str + 5, NULL, 10); if (regNum >= 32) { return false; } if (regNum >= 16 && regNum <= 23) { return false; } reg = regNum; return true; }" LLVM,RISCV,93,"bool validateTargetOperandClass(const Target &T, const Operand &Op) { if (Op.isReg()) { if (Op.getRegClass() == RISCV::FPR64RegClassID) { if (Op.getRegClass() == RISCV::FPR64CRegClassID) { return false; } } } return true; }" LLVM,RISCV,94, LLVM,RISCV,95, LLVM,RISCV,96,"void MatchAndEmitInstruction(MCInst &Inst, MCStreamer &Streamer) { if (!Inst.isRecognized()) { Streamer.EmitError(""unrecognized instruction mnemonic""); return; } if (Inst.getOption() != 0 && !Inst.isOptionEnabled()) { Streamer.EmitError(""instruction use requires an option to be enabled""); return; } if (Inst.getNumOperands() < Inst.getMinNumOperands()) { Streamer.EmitError(""too few operands for instruction""); return; } for (unsigned i = 0; i < Inst.getNumOperands(); i++) { if (!Inst.isOperandValid(i)) { Streamer.EmitError(""invalid operand for instruction""); return; } } Streamer.EmitInstruction(Inst); }" LLVM,RISCV,97, LLVM,RISCV,98, LLVM,RISCV,99,"void ParseInstruction(StringRef Instruction) { const TargetMachine *TM = TargetRegistry::lookupTarget(""riscv"", ""riscv""); if (!TM) { errs() << ""Error: unable to find RISCV target machine\n""; return; } InstructionParser Parser(TM); Parser.parse(Instruction); if (Parser.hasError()) { errs() << ""Error: "" << Parser.getErrorMessage() << ""\n""; return; } outs() << ""Parsed instruction: "" << Parser.getParsedInstruction() << ""\n""; }" LLVM,RISCV,100,"void processInstruction(Instruction *I, DebugInfo *DI) { unsigned Opcode = I->getOpcode(); if (Opcode == RISCV::PseudoLI) { Value *Op1 = I->getOperand(0); Value *Op2 = I->getOperand(1); if (Op1->getType()->isRegister() && Op2->getType()->isRegister()) { unsigned Reg1 = Op1->getReg(); unsigned Reg2 = Op2->getReg(); if (Reg1 == Reg2) { DI->addAnchor(I, Reg1); } } } }" LLVM,RISCV,101,"void print(Value *V, raw_ostream &OS) { if (auto *I = dyn_cast(V)) { OS << ""Instruction: ""; I->print(OS); } else if (auto *C = dyn_cast(V)) { OS << ""Constant: ""; C->print(OS); } else if (auto *F = dyn_cast(V)) { OS << ""Function: ""; F->print(OS); } else if (auto *M = dyn_cast(V)) { OS << ""Module: ""; M->print(OS); } else { OS << ""Unknown value: ""; V->print(OS); } }" LLVM,RISCV,102,"bool validateTargetOperandClass(const Target &T, const Operand &Op) { if (Op.isReg()) { if (Op.getRegClass() == RISCV::FPR64RegClassID) { if (Op.getRegClass() == RISCV::FPR64CRegClassID) { return false; } } } return true; }" LLVM,RISCV,103, LLVM,RISCV,104,"void processInstruction(Instruction *I, DebugInfo *DI) { unsigned Opcode = I->getOpcode(); switch (Opcode) { case RISCV::PseudoLI: break; case RISCV::ADDI: collectDebugInfo(I, DI); break; case RISCV::X0: break; case RISCV::PseudoLLA: break; case RISCV::PseudoLA: break; default: break; } }" LLVM,RISCV,105,"void print(Value *value, raw_ostream &out) { if (value->getType()->isPointerTy()) { out << ""Pointer to ""; print(value->getType()->getPointerElementType(), out); } else if (value->getType()->isIntegerTy()) { out << ""Integer""; } else if (value->getType()->isFloatingPointTy()) { out << ""Floating point""; } else if (value->getType()->isVectorTy()) { out << ""Vector""; } else if (value->getType()->isArrayTy()) { out << ""Array""; } else if (value->getType()->isStructTy()) { out << ""Struct""; } else if (value->getType()->isFunctionTy()) { out << ""Function""; } else { out << ""Unknown type""; } if (value->hasName()) { out << "" '"" << value->getName() << ""'""; }" LLVM,RISCV,106,"bool validateTargetOperandClass(const Operand &Op, const Target &Target) { if (Op.isReg()) { if (Op.getRegClass() == RISCV::FPR32RegClassID) { if (Op.getRegSize() == 32) { if (Op.getRegClass() == RISCV::FPR32CRegClassID) { return true; } } } } return false; }" LLVM,RISCV,107,"void addExpr(MCExpr *expr) { assert(expr != nullptr && ""Expr shouldn't be null!""); if (expr->getKind() == MCExpr::RISCV) { expr->addOperand(new MCExpr(MCExpr::RISCV, 0, RISCV)); } else { expr->addOperand(new MCExpr(MCExpr::RISCV, 0, RISCV)); } }" LLVM,RISCV,108, LLVM,RISCV,109, LLVM,RISCV,110,bool isReg(const MachineOperand &MO) { if (MO.isReg()) { if (MO.getReg() >= RISCV::X0 && MO.getReg() <= RISCV::X31) { return true; } } return false; } LLVM,RISCV,111,"bool isToken(const MachineOperand &MO) { if (MO.isToken()) { return true; } if (MO.isReg() && MO.getReg() == RISCV::Token) { return true; } if (MO.isMem() && MO.getMem().getBase() == RISCV::Token) { return true; } return false; } ``` This function takes a `MachineOperand` object as an argument and returns a boolean value indicating whether the operand is a token operand. It checks if the operand is a token, a register that is a token register, or a memory operand that is a token memory operand. If any of these conditions are true, the function returns `true`, otherwise it returns `false`. You can use this function in your RISCV backend of the LLVM Compiler to determine if an operand is a token operand. For example, you could use it in the `getOperandInfo` function to determine the type of an operand: ``` OperandInfo getOperandInfo(const MachineOperand &MO) { if (isToken(MO)) { return TokenOperand; } }" LLVM,RISCV,112,"void ParseDirective(const char *directive) { if (strcmp(directive, "".option"") == 0) { } else { } }" LLVM,RISCV,113, LLVM,RISCV,114,"void print(Value *value, raw_ostream &os) { if (value->getType()->isPointerTy()) { os << ""Pointer to ""; print(value->getPointerOperand(), os); } else if (value->getType()->isIntegerTy()) { os << ""Integer ""; print(value->getIntegerOperand(), os); } else if (value->getType()->isFloatTy()) { os << ""Float ""; print(value->getFloatOperand(), os); } else if (value->getType()->isDoubleTy()) { os << ""Double ""; print(value->getDoubleOperand(), os); } else if (value->getType()->isVectorTy()) { os << ""Vector ""; print(value->getVectorOperand(), os); } else if (value->getType()->isStructTy()) { os << ""Struct ""; print(value->getStructOperand(), os); } else if (value->getType()->isArrayTy()) { os << ""Array ""; print(value->getArrayOperand(), os); } else if (value->getType()->isFunctionTy()) { os << ""Function ""; print(value->getFunctionOperand(), os); } else if (value->getType()->isLabelTy()) { os << ""Label ""; print(value->getLabelOperand(), os); } else if (value->getType()->isMetadataTy()) { os << ""Metadata ""; print(value->getMetadataOperand(), os); } else if (value->getType()->isTokenTy()) { os << ""Token ""; print(value->getTokenOperand(), os); } else { os << ""Unknown type ""; print(value->getType(), os); } }" LLVM,RISCV,115,"void processInstruction(Instruction *I, DebugInfo *DI) { if (I->getOpcode() == Instruction::PseudoLI) { PseudoLI *PseudoLI = cast(I); collectDebugInfoAnchors(PseudoLI, DI); } else if (I->getOpcode() == Instruction::ADDI) { ADDI *ADDI = cast(I); collectDebugInfoAnchors(ADDI, DI); } else if (I->getOpcode() == Instruction::X0) { X0 *X0 = cast(I); collectDebugInfoAnchors(X0, DI); } else if (I->getOpcode() == Instruction::PseudoLLA) { PseudoLLA *PseudoLLA = cast(I); collectDebugInfoAnchors(PseudoLLA, DI); } }" LLVM,RISCV,116,"bool validateTargetOperandClass(const Operand &Op, const Target &Target) { if (Op.isReg()) { if (Op.getRegClass() == RISCV::FPR32RegClassID) { if (Op.getRegSize() == 32) { if (Op.getRegClass() == RISCV::FPR32CRegClassID) { return true; } } } } return false; }" LLVM,RISCV,117," raw_ostream& OS) { HazardState& Hazard = Context.getHazardState(); if (Inst.getOpcode() == RISCV::Instruction::RISCV_Instruction) { const RISCV::Instruction& RISCVInst = static_cast(Inst); if (RISCVInst.isHazardous()) { Hazard.update(RISCVInst.getHazardType()); } } Printer.printInstruction(Inst, OS); }" LLVM,RISCV,118," const TargetAsmInfo *TAI) { const MachineOperand &MO = MI->getOperand(OpNum); unsigned Reg = MO.getReg(); if (MO.isReg()) { TAI->printRegName(Reg); } else { TAI->printMemOperand(MI, OpNum, TRI); } }" LLVM,RISCV,119," SmallVectorImpl &Str) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isReg()) { const MCRegister &Reg = Op.getReg(); Str.append(Reg.getName()); } else if (Op.isImm()) { const MCImmediate &Imm = Op.getImm(); Str.append(Imm.getImm()); } else if (Op.isMem()) { const MCMemOperand &Mem = Op.getMem(); Str.append(Mem.getBase()); Str.append(Mem.getIndex()); Str.append(Mem.getScale()); } else { Str.append(""unknown""); } }" LLVM,RISCV,120,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTargetMachine *RISCVTM = static_cast(TM); const RISCVSubtarget *RISCVST = RISCVTM->getSubtarget(); MF.emitFunctionBody(); MF.emitFunctionEpilogue(); } LLVM,RISCV,121," raw_ostream& OS) { HazardState& Hazard = Context.getHazardState(); if (Inst.getOpcode() == RISCV::Instruction::RISCV_Instruction) { const RISCV::Instruction& RISCVInst = static_cast(Inst); if (RISCVInst.isHazardous()) { Hazard.update(RISCVInst.getHazardType()); } } Printer.printInstruction(Inst, OS); }" LLVM,RISCV,122, LLVM,RISCV,123," const TargetInlineAsmInfo *TIAI) { const MachineOperand &MO = MI->getOperand(OpNum); const MachineMemOperand *MMO = MO.getMemOperand(); const TargetInlineAsmInfo::AddrMode AddrMode = TIAI->getAddrMode(MMO->getAddrMode()); switch (AddrMode) { case TargetInlineAsmInfo::AddrMode::RISCV: Out << ""RISCV""; break; case TargetInlineAsmInfo::AddrMode::RISCV64: Out << ""RISCV64""; break; case TargetInlineAsmInfo::AddrMode::RISCV128: Out << ""RISCV128""; break; default: llvm_unreachable(""Invalid addressing mode""); } Out << ""(""; TRI->printRegisterName(Out, MMO->getBaseReg()); Out << "")""; if (MMO->getOffset() != 0) { Out << "" + ""; TRI->printImmediate(Out, MMO->getOffset(), MMO->getOffsetSize()); } if (MMO->getSize() != 0) { Out << "" * ""; TRI->printImmediate(Out, MMO->getSize(), MMO->getSizeSize()); } Out << ""(""; TRI->printAddressingModeSuffix(Out, MMO->getAddrMode()); Out << "")""; }" LLVM,RISCV,124, const MCSubtargetInfo *STI) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isReg()) { const char *RegName = RISCV::getRegisterName(Op.getReg()); outs() << RegName; } else { const MCExpr *Expr = Op.getExpr(); int64_t Imm = Expr->evaluateAsInt(STI); outs() << Imm; } } LLVM,RISCV,125,"void PrintAsmOperand(const MachineOperand &MO, raw_ostream &OS) { if (MO.isReg()) { OS << MO.getReg(); return; } if (MO.isImm()) { OS << MO.getImm(); return; } if (MO.isMem()) { OS << MO.getMem().getBase(); OS << MO.getMem().getIndex(); OS << MO.getMem().getScale(); return; } if (MO.isLabel()) { OS << MO.getLabel().getName(); return; } OS << ""Unknown operand type""; }" LLVM,RISCV,126,"void emitBasicBlockEnd(RISCVTargetMachine *TM, RISCVBasicBlock *BB) { }" LLVM,RISCV,127,"void emitBasicBlockStart(RISCVTargetMachine *TM, RISCVBasicBlock *BB) { TM->emitBasicBlockStart(BB); TM->emitRISCV(RISCV::RISCV_BASIC_BLOCK_START, BB); TM->emitRISCV(RISCV::RISCV_BASIC_BLOCK_START, BB); TM->emitRISCV(RISCV::RISCV_BASIC_BLOCK_START, BB); TM->emitRISCV(RISCV::RISCV_BASIC_BLOCK_START, BB); TM->emitRISCV(RISCV::RISCV_BASIC_BLOCK_START, BB); }" LLVM,RISCV,128, LLVM,RISCV,129,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTarget *RISCV = static_cast(TM->getTarget()); const RISCVTargetMachine *RISCVTM = static_cast(RISCV); } LLVM,RISCV,130," const char *target_specific_value) { printf(""%s"", operand); printf("" %s"", asm_string); printf("" %s"", target_specific_value); printf(""\n""); }" LLVM,RISCV,131, LLVM,RISCV,132,void emitEndOfAsmFile(RISCVTargetMachine *TM) { TM->emitTargetSpecificValues(); TM->emitRISCVSpecificValues(); TM->emitRISCVInstructions(); TM->emitRISCVData(); } LLVM,RISCV,133,"void emitFunctionEntryLabel(LLVMContext& context, Function* function) { const Target& target = context.getTarget(); const RISCVTargetInfo& riscvTargetInfo = target.getRISCVTargetInfo(); const char* label = riscvTargetInfo.getFunctionEntryLabel(); if (label) { context.emitLabel(label); } }" LLVM,RISCV,134, LLVM,RISCV,135,"void emitStartOfAsmFile(RISCVTargetMachine *TM) { TM->emitTargetSpecificValue(""RISCV"", ""RISCV""); }" LLVM,RISCV,136,"const char *getPassName(const Pass *P) { if (P->getPassID() == RISCV_Assembly_Printer) { return ""RISCV Assembly Printer""; } else { return P->getPassName(); } }" LLVM,RISCV,137, LLVM,RISCV,138," const TargetAsmInfo *TAI) { const MachineOperand &MO = MI->getOperand(OpNum); unsigned Reg = MO.getReg(); if (MO.isReg()) { TAI->printRegName(Reg); } else { TAI->printMemoryOperand(MO, Reg); } }" LLVM,RISCV,139,"void PrintAsmOperand(const MachineOperand &MO, raw_ostream &OS) { if (MO.isReg()) { OS << MO.getReg(); return; } if (MO.isImm()) { OS << MO.getImm(); return; } if (MO.isMem()) { OS << MO.getMem().getBase(); OS << MO.getMem().getIndex(); OS << MO.getMem().getScale(); return; } if (MO.isLabel()) { OS << MO.getLabel().getName(); return; } OS << ""Unknown operand type""; }" LLVM,RISCV,140,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTargetMachine *RISCVTM = static_cast(TM); const RISCVSubtarget *RISCVST = RISCVTM->getSubtarget(); MF.emitFunctionBody(); MF.emitFunctionEpilogue(); } LLVM,RISCV,141,PredicateConstraint getConstraint(const RISCV::Predicate &Pred) { switch (Pred) { case RISCV::Predicate::EQ: return PredicateConstraint::EQ; case RISCV::Predicate::NE: return PredicateConstraint::NE; case RISCV::Predicate::LT: return PredicateConstraint::LT; case RISCV::Predicate::LE: return PredicateConstraint::LE; case RISCV::Predicate::GT: return PredicateConstraint::GT; case RISCV::Predicate::GE: return PredicateConstraint::GE; case RISCV::Predicate::SLT: return PredicateConstraint::SLT; case RISCV::Predicate::SLE: return PredicateConstraint::SLE; case RISCV::Predicate::SGT: return PredicateConstraint::SGT; case RISCV::Predicate::SGE: return PredicateConstraint::SGE; default: return PredicateConstraint::Unknown; } } LLVM,RISCV,142,PredicateConstraint getConstraint(const RISCV::Predicate &Pred) { switch (Pred) { case RISCV::Predicate::EQ: return PredicateConstraint::EQ; case RISCV::Predicate::NE: return PredicateConstraint::NE; case RISCV::Predicate::LT: return PredicateConstraint::LT; case RISCV::Predicate::LE: return PredicateConstraint::LE; case RISCV::Predicate::GT: return PredicateConstraint::GT; case RISCV::Predicate::GE: return PredicateConstraint::GE; case RISCV::Predicate::SLT: return PredicateConstraint::SLT; case RISCV::Predicate::SLE: return PredicateConstraint::SLE; case RISCV::Predicate::SGT: return PredicateConstraint::SGT; case RISCV::Predicate::SGE: return PredicateConstraint::SGE; default: return PredicateConstraint::Unknown; } } LLVM,RISCV,143,DWARFFormat getFormat(const TargetMachine &TM) { if (TM.getTargetTriple().getArch() == Triple::riscv) { return DWARFFormat::DWARF64; } else { return DWARFFormat::DWARF32; } } LLVM,RISCV,144,"const char *getPassName(const Pass *P) { if (P->getPassKind() == RISCV_BRANCH_SELECTOR) { return ""RISCV Branch Selector""; } return P->getPassName(); }" LLVM,RISCV,145,"void lowerFormalArguments(GlobalISel &GIS, VRegs &VR) { unsigned numFormalArgs = VR.size(); for (unsigned i = 0; i < numFormalArgs; i++) { VReg &formalArg = VR[i]; if (formalArg.isRegister()) { unsigned regNum = formalArg.getRegisterNumber(); if (GIS.isCalleeSavedRegister(regNum)) { formalArg.setRegisterNumber(GIS.getCalleeSavedRegister(regNum)); } } } }" LLVM,RISCV,146," const TargetLowering &Target) { Type *ReturnType = F.getReturnType(); BasicBlock *ReturnBB = BasicBlock::Create(Context, ""return"", &F); Instruction *ReturnInst = ReturnBB->getTerminator(); ReturnInst->setOpcode(Instruction::Ret); ReturnInst->setOperand(0, ReturnType); F.addBasicBlock(ReturnBB); F.setReturnType(ReturnType); F.setReturnValue(ReturnInst); } ``` This function creates a new basic block for the return and adds it to the function. It also creates a new instruction to return the value and sets the return type and return value of the function. The `TargetLowering` parameter is used to get the target-specific value for the `PseudoRET` instruction. ``` const TargetLowering *TargetLowering = Target.getTargetLowering(); if (TargetLowering) { const TargetLowering::PseudoRET *PseudoRET = TargetLowering->getPseudoRET(); if (PseudoRET) { F.setReturnType(PseudoRET->getReturnType()); F.setReturnValue(PseudoRET->getReturnValue()); } }" LLVM,RISCV,147,"void lowerCall(CallInst *CI, IRBuilder<> &Builder) { Function *F = CI->getCalledFunction(); SmallVector Args; for (auto &Arg : CI->args()) { Args.push_back(Arg); } Value *RetVal = CI->getReturnValue(); SmallVector MarshalledArgs; for (auto &Arg : Args) { Value *MarshalledArg = Builder.CreateMarshalling(Arg, F->getParamType(Arg->getType())); MarshalledArgs.push_back(MarshalledArg); } Value *MarshalledRetVal = Builder.CreateMarshalling(RetVal, F->getReturnType()); CallInst *NewCI = Builder.CreateCall(F, MarshalledArgs, MarshalledRetVal); CI->replaceAllUsesWith(NewCI); CI->eraseFromParent(); }" LLVM,RISCV,148,"void lowerFormalArguments(GlobalISel &GIS, VRegs &VR) { unsigned numFormalArgs = VR.size(); for (unsigned i = 0; i < numFormalArgs; i++) { VReg &formalArg = VR[i]; if (formalArg.isRegister()) { unsigned regNum = formalArg.getRegisterNumber(); if (GIS.isCalleeSavedRegister(regNum)) { formalArg.setRegisterNumber(GIS.getCalleeSavedRegister(regNum)); } } } }" LLVM,RISCV,149," const TargetLowering &Target) { Type *ReturnType = F.getReturnType(); BasicBlock *ReturnBB = BasicBlock::Create(Context, ""return"", &F); Instruction *ReturnInst = ReturnBB->getTerminator(); ReturnInst->setOpcode(Instruction::Ret); ReturnInst->setOperand(0, ReturnType); F.addBasicBlock(ReturnBB); F.setReturnType(ReturnType); F.setReturnValue(ReturnInst); } ``` This function creates a new basic block for the return and adds it to the function. It also creates a new instruction to return the value and sets the return type and return value of the function. The `TargetLowering` parameter is used to get the target-specific value for the `PseudoRET` instruction. ``` const TargetLowering *TargetLowering = Target.getTargetLowering(); if (TargetLowering) { const TargetLowering::PseudoRET *PseudoRET = TargetLowering->getPseudoRET(); if (PseudoRET) { F.setReturnType(PseudoRET->getReturnType()); F.setReturnValue(PseudoRET->getReturnValue()); } }" LLVM,RISCV,150, LLVM,RISCV,151,"const char *getPassName(const Pass *P) { if (P->getTargetMachine()->getTargetTriple().isRISCV()) { return ""RISCV""; } else { return ""Unknown""; } }" LLVM,RISCV,152,void runOnMachineFunction(MachineFunction &MF) { const RISCVTargetMachine &TM = MF.getTargetMachine(); const RISCVSubtarget &ST = TM.getSubtarget(); MF.emitFunctionBody(); MF.emitFunctionEpilogue(); } LLVM,RISCV,153, LLVM,RISCV,154,"void print(Value *value, raw_ostream &stream) { Type *type = value->getType(); if (type->isRISCV()) { stream << ""RISCV ""; RISCVType *riscvType = cast(type); stream << riscvType->getName(); stream << "" ("" << riscvType->getSize() << "")""; } else { stream << type->getName(); } stream << "" "" << value->getName(); }" LLVM,RISCV,155, LLVM,RISCV,156, LLVM,RISCV,157,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTargetMachine *RISCVTM = static_cast(TM); const RISCVSubtarget *RISCVST = RISCVTM->getSubtarget(); MF.emitFunctionBody(); MF.emitFunctionEpilogue(); } LLVM,RISCV,158,"static void getInstruction(uint64_t PC, uint64_t Size, uint64_t Instruction) { InitializeRISCVTarget(); RISCVDisassembler Disassembler(TheRISCVTarget); SmallVector Buffer; Disassembler.getInstruction(PC, Size, Instruction, Buffer); outs() << ""Disassembled instruction: "" << Buffer.data() << ""\n""; }" LLVM,RISCV,159,"const Target *getTarget() { return TargetRegistry::lookupTarget(""riscv32"", ""riscv32""); } const Target *getTarget64() { return TargetRegistry::lookupTarget(""riscv64"", ""riscv64""); } const Target *getTarget16() { return TargetRegistry::lookupTarget(""riscv16"", ""riscv16""); } const Target *getTargetC() { return TargetRegistry::lookupTarget(""riscv_c"", ""riscv_c""); } const Target *getTargetRISCV32Only_16() { return TargetRegistry::lookupTarget(""riscv32_only_16"", ""riscv32_only_16""); } const Target *getTargetRISCV32() { return TargetRegistry::lookupTarget(""riscv32"", ""riscv32""); } const Target *getTargetRISCV64() { return TargetRegistry::lookupTarget(""riscv64"", ""riscv64""); } const Target *getTargetRISCV16() { return TargetRegistry::lookupTarget(""riscv16"", ""riscv16""); } const Target *getTargetRISCV_C() { return TargetRegistry::lookupTarget(""riscv_c"", ""riscv_c""); } const Target *getTargetRISCV32Only_16() { return TargetRegistry::lookupTarget(""riscv32_only_16"", ""riscv32_only_16""); } const Target *getTargetRISCV32() { return TargetRegistry::lookupTarget(""riscv32"", ""riscv32""); }" LLVM,RISCV,160, LLVM,RISCV,161, LLVM,RISCV,162, LLVM,RISCV,163, LLVM,RISCV,164, LLVM,RISCV,165,"const Target *getTarget() { return TargetRegistry::lookupTarget(""riscv32"", ""riscv32""); } const Target *getTarget64() { return TargetRegistry::lookupTarget(""riscv64"", ""riscv64""); } const Target *getTarget16() { return TargetRegistry::lookupTarget(""riscv16"", ""riscv16""); } const Target *getTargetC() { return TargetRegistry::lookupTarget(""riscv_c"", ""riscv_c""); } const Target *getTargetRISCV32Only_16() { return TargetRegistry::lookupTarget(""riscv32_only_16"", ""riscv32_only_16""); } const Target *getTargetRISCV32() { return TargetRegistry::lookupTarget(""riscv32"", ""riscv32""); } const Target *getTargetRISCV64() { return TargetRegistry::lookupTarget(""riscv64"", ""riscv64""); } const Target *getTargetRISCV16() { return TargetRegistry::lookupTarget(""riscv16"", ""riscv16""); } const Target *getTargetRISCV_C() { return TargetRegistry::lookupTarget(""riscv_c"", ""riscv_c""); } const Target *getTargetRISCV32Only_16() { return TargetRegistry::lookupTarget(""riscv32_only_16"", ""riscv32_only_16""); } const Target *getTargetRISCV32() { return TargetRegistry::lookupTarget(""riscv32"", ""riscv32""); }" LLVM,RISCV,166," const MCExpr *Value, const MCSubtargetInfo *STI) { if (Inst->getOpcode() == RISCV::WORD || Inst->getOpcode() == RISCV::LONG || Inst->getOpcode() == RISCV::FOURBYTE) { if (Value->getKind() == MCExpr::SymbolRef) { const MCSymbol *Sym = Value->getSymbol(); if (Sym->isMicroMIPS()) { Sym->setMicroMIPS(false); } } } }" LLVM,RISCV,167,"void finish(RISCV::TargetMachine *TM) { RISCV::LiveRange *LR = TM->getLiveRange(); while (LR) { for (RISCV::Instruction *I : LR->getInstructions()) { I->setOperand(0, LR->getRegister()); } LR = LR->getNext(); } }" LLVM,RISCV,168,"Streamer *getStreamer(RISCVTargetMachine *TM) { const Triple &Triple = TM->getTargetTriple(); Streamer *Streamer = new Streamer(Triple); Streamer->setTargetSpecific(RISCV::RISCV, RISCV::RISCV); return Streamer; }" LLVM,RISCV,169,"void reset(Value *V, Instance *I) { if (!V->getType()->isScalarTy()) { return; } RISCVTargetSpecificValue *RISCVValue = V->getRISCVTargetSpecificValue(); if (!RISCVValue) { return; } RISCVValue->reset(); }" LLVM,RISCV,170,"const char *getPassName(const Pass *P) { if (P->getTargetMachine()->getTargetTriple().isRISCV()) { return ""RISCV""; } else { return ""Unknown""; } }" LLVM,RISCV,171,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTargetMachine *RISCVTM = static_cast(TM); const RISCVSubtarget *RISCVST = RISCVTM->getSubtarget(); MF.emitFunctionBody(); MF.emitFunctionEpilogue(); } LLVM,RISCV,172, LLVM,RISCV,173,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTargetMachine *RISCVTM = static_cast(TM); const RISCVSubtarget *RISCVST = RISCVTM->getSubtarget(); MF.emitFunctionBody(); MF.emitFunctionEpilogue(); } LLVM,RISCV,174,"const char *getPassName(const Pass *P) { if (P->getTargetMachine()->getTargetTriple().isRISCV()) { return ""RISCV""; } else { return ""Unknown""; } }" LLVM,RISCV,175,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTargetMachine *RISCVTM = static_cast(TM); const RISCVSubtarget *RISCVST = RISCVTM->getSubtarget(); MF.emitFunctionBody(); MF.emitFunctionEpilogue(); } LLVM,RISCV,176,"void determineCalleeSaves(MachineFunction &MF) { SmallVector CalleeSavedRegs; MF.getTarget().getCalleeSavedRegs(CalleeSavedRegs); for (MCPhysReg Reg : CalleeSavedRegs) { if (Reg.isFPReg() && Reg.getRegClass() == RISCV::GR32BitRegClass) { MF.addCalleeSavedReg(Reg); } else if (Reg.isFPReg() && Reg.getRegClass() == RISCV::FP64BitRegClass) { MF.addCalleeSavedReg(Reg); } else if (Reg.isGPReg() && Reg.getRegClass() == RISCV::GR32BitRegClass) { MF.addCalleeSavedReg(Reg); } } }" LLVM,RISCV,177,void eliminateCallFramePseudoInstr(MachineFunction &MF) { if (!MF.getTarget().hasCallFrameSetupPseudoInstr() || !MF.getTarget().hasCallFrameDestroyPseudoInstr()) return; for (auto &BB : MF.getBasicBlocks()) { for (auto &I : BB) { if (I.getOpcode() == RISCV::ADJCALLSTACKDOWN || I.getOpcode() == RISCV::sp_64 || I.getOpcode() == RISCV::sp) { I.eraseFromParent(); } } } } LLVM,RISCV,178,"void emitEpilogue(LLVMContext& Context, Function& F) { const Target& Target = Context.getTarget(); const RISCVTargetInfo& RISCVTI = Target.getRISCVTargetInfo(); const Register& SP = RISCVTI.getRegisterInfo().getReg(RISCV::SP_64); const Register& FP = RISCVTI.getRegisterInfo().getReg(RISCV::FP_64); const Register& ZERO = RISCVTI.getRegisterInfo().getReg(RISCV::ZERO_64); F.getEntryBlock().getTerminator()->setSuccessor(0, &F.getExitBlock()); F.getExitBlock().getTerminator()->setSuccessor(0, &F.getEntryBlock()); Instruction* I = F.getEntryBlock().getFirstNonPHI(); I->setOpcode(RISCV::ADD64); I->setOperand(0, SP); I->setOperand(1, FP); I->setOperand(2, ZERO); I = F.getExitBlock().getFirstNonPHI(); I->setOpcode(RISCV::ADD); I->setOperand(0, SP); I->setOperand(1, FP); I->setOperand(2, ZERO); Instruction* Ret = F.getExitBlock().getTerminator(); Ret->setOpcode(RISCV::RET); Ret->setOperand(0, SP); }" LLVM,RISCV,179,bool hasFP(Function &F) { if (F.hasFramePointer()) { return true; } if (F.getFramePointer() != F.getStackPointer()) { return true; } return false; } LLVM,RISCV,180,"bool hasReservedCallFrame(Function &F) { if (F.hasFramePointer()) return false; for (auto &BB : F.getBasicBlocks()) { for (auto &I : BB.getInstructions()) { if (I.getOpcode() == Instruction::Call) { CallInst *CI = cast(I); if (CI->getCalledFunction()->hasReservedCallFrame()) return true; } } } return false; } ``` This function takes a `Function` object as an argument and returns a boolean indicating whether the function has a reserved call frame. It checks if the function has a frame pointer and if it has a call site that requires argument space. If either of these conditions is true, it returns true, otherwise it returns false. You can use this function in your RISCV backend of the LLVM Compiler to determine whether a function has a reserved call frame. For example, you could use it in the `getFramePointer` function to determine whether to allocate a frame pointer for a function: ``` FramePointer getFramePointer(Function &F) { if (hasReservedCallFrame(F)) return FramePointer::None; else return FramePointer::FP; }" LLVM,RISCV,181,"bool spillCalleeSavedRegisters(MachineFunction &MF) { const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const TargetRegisterClass *CalleeSavedRegClass = TRI->getCalleeSavedRegClass(); if (CalleeSavedRegClass->isEmpty()) return false; for (const TargetRegisterClass *RegClass : CalleeSavedRegClass->getSubClasses()) { for (unsigned Reg : RegClass->getRegisters()) { const MachineRegisterInfo *MRI = MF.getRegInfo(); const MachineStackSlot *StackSlot = MRI->getStackSlot(Reg); MachineInstr *StoreInstr = MF.createStore(Reg, StackSlot); MF.addInstr(StoreInstr); } } return true; }" LLVM,RISCV,182,bool canUseAsEpilogue(BasicBlock *MBB) { if (MBB->getParent()->getTarget() != RISCV) return false; if (MBB->size() != 1) return false; if (MBB->front()->getOpcode() != RISCV::RET) return false; return true; } LLVM,RISCV,183,bool canUseAsPrologue(MachineBasicBlock *MBB) { if (MBB->getNumPredecessors() != 1) return false; if (MBB->getNumSuccessors() != 1) return false; if (MBB->getPredecessor(0)->getOpcode() != Instruction::Call) return false; if (MBB->getSuccessor(0)->getOpcode() != Instruction::Return) return false; if (MBB->getPredecessor(0)->getOpcode() != Instruction::RISCV) return false; if (MBB->getSuccessor(0)->getOpcode() != Instruction::RISCV) return false; if (MBB->getPredecessor(0)->getOpcode() != Instruction::RISCV::X5) return false; if (MBB->getSuccessor(0)->getOpcode() != Instruction::RISCV::X5) return false; return true; } LLVM,RISCV,184,void determineCalleeSaves(MachineFunction &MF) { const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo(); const TargetRegisterClass *CalleeSavedRegs = TRI->getCalleeSavedRegs(); for (unsigned i = 0; i < CalleeSavedRegs->getNumRegs(); i++) { const TargetRegisterClass *RegClass = CalleeSavedRegs->getRegClass(i); if (RegClass->getRegClass() == RISCV::X1) { MF.addCalleeSavedReg(RegClass); } else if (RegClass->getRegClass() == RISCV::X8) { if (MF.getFramePointer()) { MF.addCalleeSavedReg(RegClass); } } else if (RegClass->getRegClass() == RISCV::X16) { if (MF.getFramePointer() && MF.getTargetTriple().getArch() == Triple::riscv64) { MF.addCalleeSavedReg(RegClass); } } else if (RegClass->getRegClass() == RISCV::X28) { if (MF.getFramePointer() && MF.getTargetTriple().getArch() == Triple::riscv64) { MF.addCalleeSavedReg(RegClass); } LLVM,RISCV,185,void determineFrameLayout(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTargetMachine *RISCVTM = static_cast(TM); const RISCVSubtarget *RISCVST = RISCVTM->getSubtarget(); const FrameLayout &FL = MF.getFrameLayout(); const FrameLayout::FrameLayoutType FLT = FL.getType(); const FrameLayout::FrameLayoutType FLT_RISCV = FLT.getRISCVType(); if (FLT_RISCV == FrameLayout::RISCV_Stack) { MF.setFrameLayout(FrameLayout::RISCV_Stack); } else if (FLT_RISCV == FrameLayout::RISCV_Register) { MF.setFrameLayout(FrameLayout::RISCV_Register); } else { MF.setFrameLayout(FrameLayout::Unknown); } } LLVM,RISCV,186,"void eliminateCallFramePseudoInstr(LLVMContext& Context, Function& F) { if (!F.hasFnAttribute(Attribute::CallFrameSetup) || !F.hasFnAttribute(Attribute::CallFrameDestroy)) { return; } Instruction* CallFrameSetup = F.getEntryBlock().getFirstNonPHI(); Instruction* CallFrameDestroy = F.getExitBlock().getFirstNonPHI(); CallFrameSetup->eraseFromParent(); CallFrameDestroy->eraseFromParent(); F.removeFnAttribute(Attribute::CallFrameSetup); F.removeFnAttribute(Attribute::CallFrameDestroy); }" LLVM,RISCV,187,"void emitEpilogue(Function *F, MachineFunction *MF) { BasicBlock *EpilogBB = MF->getEpilogBB(); if (!EpilogBB) return; if (MF->getFramePointerEliminated()) { emitError(""Frame pointer should not have been eliminated""); return; } if (MF->getSecondSPAdjustAmount() <= 0) { emitError(""Second SP adjust amount should be greater than zero""); return; } }" LLVM,RISCV,188,static StackID getStackIDForScalableVectors(const Module &M) { const TargetSpecificValue *RISCV = M.getTargetSpecificValue(); if (!RISCV) { return StackID::None; } const StackID *StackID = RISCV->getStackID(); if (!StackID) { return StackID::None; } return *StackID; } LLVM,RISCV,189,bool hasFP(Function &F) { if (F.hasFramePointer()) { return true; } if (F.getFramePointer() != F.getStackPointer()) { return true; } return false; } LLVM,RISCV,190,bool hasReservedCallFrame(Function &F) { if (F.getFramePointer()) { return true; } if (F.getCallSites().empty()) { return false; } for (auto &CS : F.getCallSites()) { if (CS.getArgumentSpace().empty()) { return false; } } return true; } LLVM,RISCV,191,"void processFunctionBeforeFrameFinalized(Function &F, MachineFunction &MF) { const FrameInfo &FI = MF.getFrameInfo(); if (FI.hasFramePointer()) { Register FramePointer = FI.getFramePointer(); if (FramePointer.isGPR()) { GPRRegClass GPR = FramePointer.getGPRRegClass(); if (GPR == RISCV::GPRRegClass::RISCV) { uint64_t TargetSpecificValue = MF.getTargetSpecificValue(); if (TargetSpecificValue == 11) { } } } } }" LLVM,RISCV,192,"bool restoreCalleeSavedRegisters(MachineFunction &MF) { if (!MF.hasCalleeSavedRegs()) return false; SmallVector CalleeSavedRegs; MF.getCalleeSavedRegs(CalleeSavedRegs); BasicBlock *RestoreBB = MF.createBasicBlock(); for (MCRegister Reg : CalleeSavedRegs) { MachineInstr *LoadInstr = MF.createLoadRegFromStackSlot(Reg, MF.getStackPointer(), 0); RestoreBB->addInstruction(LoadInstr); } MF.addBasicBlock(RestoreBB); return true; }" LLVM,RISCV,193,"bool spillCalleeSavedRegisters(RISCV::PseudoCALLReg &CallReg, RISCV::X5 &X5, RISCVII::MO_CALL &MO_CALL) { for (int i = 0; i < CallReg.getNumCalleeSavedRegs(); i++) { RISCV::X5 Reg = CallReg.getCalleeSavedReg(i); if (Reg.isValid()) { MO_CALL.emitStoreRegToStackSlot(Reg, X5.getStackSlot()); } } return true; }" LLVM,RISCV,194, const RegClass &RC) { const RegClass &CalleeSavedRegs = TRI->getCalleeSavedRegs(); for (const RegClass &Reg : CalleeSavedRegs) { if (Reg.isPhysical() && !Reg.isCallerSaved()) { RC.addReg(Reg); } else if (Reg.isVirtual() && !Reg.isCallerSaved()) { RC.addReg(Reg); } } } LLVM,RISCV,195,void determineFrameLayout(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTargetMachine *RISCVTM = static_cast(TM); const RISCVSubtarget *RISCVST = RISCVTM->getSubtarget(); const FrameLayout &FL = MF.getFrameLayout(); const FrameLayout::FrameLayoutType FLT = FL.getType(); const FrameLayout::FrameLayoutType FLT_RISCV = FLT.getRISCVType(); if (FLT_RISCV == FrameLayout::RISCV_Stack) { MF.setFrameLayout(FrameLayout::RISCV_Stack); } else if (FLT_RISCV == FrameLayout::RISCV_Register) { MF.setFrameLayout(FrameLayout::RISCV_Register); } else { MF.setFrameLayout(FrameLayout::Unknown); } } LLVM,RISCV,196,"void eliminateCallFramePseudoInstr(LLVMContext& Context, Function& F) { if (!F.hasFnAttribute(Attribute::CallFrameSetup) || !F.hasFnAttribute(Attribute::CallFrameDestroy)) { return; } Instruction* CallFrameSetup = F.getEntryBlock().getFirstNonPHI(); Instruction* CallFrameDestroy = F.getExitBlock().getFirstNonPHI(); CallFrameSetup->eraseFromParent(); CallFrameDestroy->eraseFromParent(); F.removeFnAttribute(Attribute::CallFrameSetup); F.removeFnAttribute(Attribute::CallFrameDestroy); }" LLVM,RISCV,197,"void emitEpilogue(LLVMContext &Context, Function &F) { if (!F.hasFnAttribute(Attribute::NoFramePointerElim)) { BasicBlock *EpilogBB = BasicBlock::Create(Context, ""epilog"", &F); IRBuilder<> Builder(EpilogBB); Builder.CreateStore(Builder.CreateLoad(F.getArg(0), ""fp""), ""sp""); Builder.CreateStore(Builder.CreateLoad(F.getArg(1), ""sp""), ""fp""); Builder.CreateBr(EpilogBB); } }" LLVM,RISCV,198,"void emitPrologue(LLVMContext &Context, Function &F) { TargetSpecificValue *RISCV = getTargetSpecificValue(Context, ""RISCV""); BasicBlock *PrologBB = BasicBlock::Create(Context, ""prolog"", &F); IRBuilder<> Builder(PrologBB); Builder.CreateBr(RISCV, 0); BasicBlock *EpilogBB = BasicBlock::Create(Context, ""epilog"", &F); Builder.SetInsertPoint(EpilogBB); Builder.CreateBr(RISCV, 0); }" LLVM,RISCV,199,"APInt getFrameIndexReference(Value *V, APInt &Offset) { APInt BaseReg = V->getOperand(0); Offset = V->getOperand(1); if (!BaseReg.isValidRISCVRegister()) { errs() << ""Invalid RISCV register: "" << BaseReg << ""\n""; return APInt(); } if (!Offset.isValidRISCVImmediate()) { errs() << ""Invalid RISCV immediate: "" << Offset << ""\n""; return APInt(); } return BaseReg; }" LLVM,RISCV,200,static StackID getStackIDForScalableVectors(const Module &M) { const TargetSpecificValue *RISCV = M.getTargetSpecificValue(); if (!RISCV) { return StackID::None; } const StackID *StackID = RISCV->getStackID(); if (!StackID) { return StackID::None; } return *StackID; } LLVM,RISCV,201,"void processFunctionBeforeFrameFinalized(Function &F, MachineFunction &MF) { const FrameInfo &FI = MF.getFrameInfo(); const TargetRegisterClass &RegClass = MF.getRegisterClass(); unsigned NumRegs = RegClass.getNumRegs(); unsigned NumUsedRegs = FI.getNumUsedRegs(); unsigned NumAvailRegs = NumRegs - NumUsedRegs; if (NumAvailRegs == 0) return; for (unsigned i = 0; i < NumAvailRegs; i++) { const TargetRegister &Reg = RegClass.getRegister(i); FI.addRegister(Reg); } }" LLVM,RISCV,202,void determineFrameLayout(MachineFunction &MF) { const TargetMachine &TM = MF.getTargetMachine(); const RISCVSubtarget &ST = TM.getSubtarget(); const FrameLayout &FL = MF.getFrameLayout(); const unsigned StackAlign = ST.getStackAlign(); const unsigned FramePointerAlign = ST.getFramePointerAlign(); const unsigned StackSize = ST.getStackSize(); const unsigned FrameSize = ST.getFrameSize(); FL.setStackAlign(StackAlign); FL.setFramePointerAlign(FramePointerAlign); FL.setStackSize(StackSize); FL.setFrameSize(FrameSize); } LLVM,RISCV,203,"void emitEpilogue(Function *F, MachineFunction *MF) { BasicBlock *EpilogBB = MF->getEpilogBB(); if (!EpilogBB) return; if (MF->getFramePointerEliminated()) { emitError(""Frame pointer should not have been eliminated""); return; } if (MF->getSecondSPAdjustAmount() <= 0) { emitError(""Second SP adjust amount should be greater than zero""); return; } }" LLVM,RISCV,204,void processFunctionBeforeFrameFinalized(MachineFunction &MF) { const TargetFrameLowering *TFL = MF.getFrameLowering(); const TargetFrameInfo *TFI = MF.getFrameInfo(); if (TFI->hasFramePointer()) { unsigned FramePointerReg = TFI->getFramePointerReg(); if (FramePointerReg < RISCV::GPRRegClass::FirstReg || FramePointerReg > RISCV::GPRRegClass::LastReg) { TFL->setFramePointerReg(RISCV::GPRRegClass::FirstReg); } } if (TFI->hasStackPointer()) { unsigned StackPointerReg = TFI->getStackPointerReg(); if (StackPointerReg < RISCV::GPRRegClass::FirstReg || StackPointerReg > RISCV::GPRRegClass::LastReg) { TFL->setStackPointerReg(RISCV::GPRRegClass::FirstReg); } } LLVM,RISCV,205,bool enableShrinkWrapping(const Target &T) { if (T.getArch() == Triple::riscv) { return true; } return false; } LLVM,RISCV,206,"bool restoreCalleeSavedRegisters(MachineFunction &MF) { if (!MF.hasCalleeSavedRegs()) return false; SmallVector CalleeSavedRegs; MF.getCalleeSavedRegs(CalleeSavedRegs); BasicBlock *RestoreBB = MF.createBasicBlock(); for (MCRegister Reg : CalleeSavedRegs) { MachineInstr *LoadInstr = MF.createLoadRegFromStackSlot(Reg, MF.getStackPointer(), 0); RestoreBB->addInstruction(LoadInstr); } MF.addBasicBlock(RestoreBB); return true; }" LLVM,RISCV,207,"bool spillCalleeSavedRegisters(RISCV::PseudoCALLReg &CallReg, RISCV::X5 &X5, RISCVII::MO_CALL &MO_CALL) { for (int i = 0; i < CallReg.getNumCalleeSavedRegs(); i++) { RISCV::X5 Reg = CallReg.getCalleeSavedReg(i); if (Reg.isValid()) { MO_CALL.emitStoreRegToStackSlot(Reg, X5.getStackSlot()); } } return true; }" LLVM,RISCV,208,void determineFrameLayout(MachineFunction &MF) { const TargetMachine &TM = MF.getTargetMachine(); const RISCVSubtarget &ST = TM.getSubtarget(); const FrameLayout &FL = MF.getFrameLayout(); const unsigned StackAlign = ST.getStackAlign(); const unsigned FramePointerAlign = ST.getFramePointerAlign(); const unsigned StackSize = ST.getStackSize(); const unsigned FrameSize = ST.getFrameSize(); FL.setStackAlign(StackAlign); FL.setFramePointerAlign(FramePointerAlign); FL.setStackSize(StackSize); FL.setFrameSize(FrameSize); } LLVM,RISCV,209,"void emitEpilogue(Function *F, MachineFunction *MF) { BasicBlock *EpilogBB = MF->getEpilogBB(); if (!EpilogBB) return; if (MF->getFramePointerEliminated()) { emitError(""Frame pointer should not have been eliminated""); return; } if (MF->getSecondSPAdjustAmount() <= 0) { emitError(""Second SP adjust amount should be greater than zero""); return; } }" LLVM,RISCV,210,void determineFrameLayout(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTargetMachine *RISCVTM = static_cast(TM); const RISCVSubtarget *RISCVST = RISCVTM->getSubtarget(); const FrameLayout &FL = MF.getFrameLayout(); const FrameLayout::FrameLayoutType FLT = FL.getType(); const FrameLayout::FrameLayoutType FLT_RISCV = FLT.getRISCVType(); if (FLT_RISCV == FrameLayout::RISCV_Stack) { MF.setFrameLayout(FrameLayout::RISCV_Stack); } else if (FLT_RISCV == FrameLayout::RISCV_Register) { MF.setFrameLayout(FrameLayout::RISCV_Register); } else { MF.setFrameLayout(FrameLayout::Unknown); } } LLVM,RISCV,211,"void emitEpilogue(Function *F, MachineFunction *MF) { BasicBlock *EpilogBB = MF->getEpilogBB(); if (!EpilogBB) return; if (MF->getFramePointerEliminated()) { emitError(""Frame pointer should not have been eliminated""); return; } if (MF->getSecondSPAdjustAmount() <= 0) { emitError(""Second SP adjust amount should be greater than zero""); return; } }" LLVM,RISCV,212, LLVM,RISCV,213,bool canUseAsEpilogue(BasicBlock *MBB) { if (MBB->getParent()->getTarget() != RISCV) return false; if (MBB->size() != 1) return false; if (MBB->front()->getOpcode() != RISCV::RET) return false; return true; } LLVM,RISCV,214,bool canUseAsPrologue(MachineBasicBlock *MBB) { if (MBB->getNumPredecessors() != 1) return false; if (MBB->getNumSuccessors() != 1) return false; if (MBB->getPredecessor(0)->getOpcode() != Instruction::Call) return false; if (MBB->getSuccessor(0)->getOpcode() != Instruction::Return) return false; if (MBB->getPredecessor(0)->getOpcode() != Instruction::RISCV) return false; if (MBB->getSuccessor(0)->getOpcode() != Instruction::RISCV) return false; if (MBB->getPredecessor(0)->getOpcode() != Instruction::RISCV::X5) return false; if (MBB->getSuccessor(0)->getOpcode() != Instruction::RISCV::X5) return false; return true; } LLVM,RISCV,215,"void emitEpilogue(Function *F, MachineFunction *MF) { BasicBlock *EpilogBB = MF->getEpilogBB(); if (!EpilogBB) return; if (MF->getFramePointerEliminated()) { emitError(""Frame pointer should not have been eliminated""); return; } if (MF->getSecondSPAdjustAmount() <= 0) { emitError(""Second SP adjust amount should be greater than zero""); return; } }" LLVM,RISCV,216," IRBuilder<>& Builder, Value* FrameIndex, const DataLayout& DL) { Value* BaseReg = Builder.CreateLoad(FrameIndex, ""frame_index_base""); APInt Offset = APInt(DL.getPointerSizeInBits(), 0); if (ConstantInt* CI = dyn_cast(FrameIndex)) { Offset = CI->getValue(); } return std::make_pair(BaseReg, Offset); }" LLVM,RISCV,217,"SmallVector determineCalleeSaves(Function &F) { SmallVector calleeSaves; const TargetRegisterInfo *TRI = F.getParent()->getTargetRegisterInfo(); SmallVector calleeSavedRegs = TRI->getCalleeSavedRegs(); for (unsigned i = 0; i < calleeSavedRegs.size(); i++) { unsigned reg = calleeSavedRegs[i]; if (reg == RISCV::X1 || reg == RISCV::X8 || reg == RISCV::X10 || reg == RISCV::X11 || reg == RISCV::X12 || reg == RISCV::X13 || reg == RISCV::X14 || reg == RISCV::X15 || reg == RISCV::X16 || reg == RISCV::X17 || reg == RISCV::X28 || reg == RISCV::X29 || reg == RISCV::X30 || reg == RISCV::X31) { calleeSaves.push_back(reg); } } return calleeSaves; }" LLVM,RISCV,218,"void determineFrameLayout(Function &F, const TargetMachine &TM) { const TargetRegisterClass *GPRRegClass = TM.getRegisterClass(TargetRegisterClass::GPR); unsigned NumGPRRegs = GPRRegClass->getNumRegisters(); SmallVector FrameLayout; for (auto &BB : F.getBasicBlockList()) { for (auto &I : BB) { if (I.getOpcode() == Instruction::Load || I.getOpcode() == Instruction::Store) { const TargetRegisterClass *RegClass = TM.getRegisterClass(I.getOperand(0)); if (RegClass == GPRRegClass) { unsigned RegNum = I.getOperand(0).getReg(); if (RegNum < NumGPRRegs) { FrameLayout.push_back(MachineOperand(RegNum, GPRRegClass)); } } } } }" LLVM,RISCV,219,"void emitEpilogue(Function *F, MachineFunction *MF) { BasicBlock *EpilogBB = MF->getEpilogBB(); if (!EpilogBB) return; if (MF->getFramePointerEliminated()) { emitError(""Frame pointer should not have been eliminated""); return; } if (MF->getSecondSPAdjustAmount() <= 0) { emitError(""Second SP adjust amount should be greater than zero""); return; } }" LLVM,RISCV,220,"APInt getFrameIndexReference(Module *M, IRBuilder<> &B, Value *Index) { APInt RISCVTargetValue = APInt(32, 0x12345678); Value *BaseReg = B.CreateLoad(RISCV::X2, ""base_reg"", false, false); Value *Offset = B.CreateAdd(B.CreateZExt(Index, Type::getInt32Ty(M->getContext())), B.CreateConstant(APInt(32, 0x100000000))); Value *FrameIndex = B.CreateAdd(BaseReg, Offset); return FrameIndex; }" LLVM,RISCV,221,bool hasFP(Function &F) { if (F.getFramePointer() != nullptr) return true; if (F.getTargetSpecificValue()) return true; if (F.getTargetSpecificValue()) return true; return false; } LLVM,RISCV,222,"void processFunctionBeforeFrameFinalized(Function &F, MachineFunction &MF) { const FrameInfo &FI = MF.getFrameInfo(); if (FI.hasFramePointer()) { Register FramePointer = FI.getFramePointer(); if (FramePointer.isGPR()) { GPRRegClass RegClass = FramePointer.getGPRRegClass(); if (RegClass == RISCV::GPRRegClass) { unsigned RegNum = FramePointer.getRegNum(); if (RegNum == 11) { } } } } }" LLVM,RISCV,223,static bool isCheriPureCapABI(const Function &F) { return F.getCallingConv() == CallingConv::RISCVABI && F.getParamAttrs(0).hasAttribute(Attribute::CheriPureCap); } LLVM,RISCV,224, LLVM,RISCV,225,"void emitEpilogue(Function *F, MachineFunction *MF) { BasicBlock *EpilogBB = MF->getEpilogBB(); if (!EpilogBB) return; if (MF->getFramePointerEliminated()) { emitError(""Frame pointer should not have been eliminated""); return; } if (MF->getSecondSPAdjustAmount() <= 0) { emitError(""Second SP adjust amount should be greater than zero""); return; } }" LLVM,RISCV,226, LLVM,RISCV,227,"bool restoreCalleeSavedRegisters(MachineFunction &MF) { if (MF.getABI() != RISCVABI::isCheriPureCapABI) return false; if (!RISCV::PseudoTAIL) return false; if (!RISCVII::MO_CALL) return false; if (!RISCV::PseudoRET) return false; for (auto &Reg : MF.getRegs()) { if (Reg.isCalleeSaved()) { RISCVII::MO_CALL(Reg, Reg.getStackSlot()); } } return true; }" LLVM,RISCV,228,"bool spillCalleeSavedRegisters(MachineFunction &MF) { if (MF.getABI() != RISCVABI::isCheriPureCapABI) return false; if (MF.getLibCall() == RISCV::SaveRestoreLibCall && MF.getLibCallABI() == RISCVABI::isCheriPureCapABI) return false; for (int i = 0; i < MF.getNumRegs(); i++) { MachineReg Reg = MF.getReg(i); if (Reg.isCalleeSaved()) { MachineInstr *MI = MF.createStore(Reg, MF.getStackSlot(Reg), RISCVII::MO_CALL); MF.addInstruction(MI); } } return true; }" LLVM,RISCV,229," IRBuilder<>& Builder, Value* FrameIndex, const DataLayout& DL) { Value* BaseReg = Builder.CreateLoad(FrameIndex, ""frame_index_base""); APInt Offset = APInt(DL.getPointerSizeInBits(), 0); if (ConstantInt* CI = dyn_cast(FrameIndex)) { Offset = CI->getValue(); } return std::make_pair(BaseReg, Offset); }" LLVM,RISCV,230,"void emitEpilogue(Function *F, MachineFunction *MF) { BasicBlock *EpilogBB = MF->getEpilogBB(); if (!EpilogBB) return; if (MF->getFramePointerEliminated()) { emitError(""Frame pointer should not have been eliminated""); return; } if (MF->getSecondSPAdjustAmount() <= 0) { emitError(""Second SP adjust amount should be greater than zero""); return; } }" LLVM,RISCV,231,"void emitEpilogue(LLVMContext& Context, Function& F) { auto RISCV = Context.getTarget(); F.getEntryBlock().getInstList().push_back( new ReturnInst(Context, RISCV.getReturnValue(), F.getReturnType())); }" LLVM,RISCV,232,"void emitPrologue(Function &F, const TargetMachine &TM) { const Target &RISCV = TM.getTarget(); const TargetMachine::RISCV &RISCVTM = TM.getRISCV(); BasicBlock *BB = F.getEntryBlock(); Instruction *InsertPt = &BB->front(); IRBuilder<> Builder(InsertPt); Builder.CreateAlloca(RISCV.getPointerSize(), nullptr, ""stack_frame""); Builder.CreateStore(RISCV.getPointerSize(), Builder.CreateConstant(0), ""stack_frame""); Builder.CreateBr(F.getEntryBlock()); }" LLVM,RISCV,233,bool hasFP(Function &F) { if (F.hasFramePointer()) { return true; } if (F.getFramePointer() != F.getStackPointer()) { return true; } return false; } LLVM,RISCV,234,"void emitEpilogue(LLVMContext& Context, Function& F) { BasicBlock* BB = F.getEntryBlock(); Instruction* I = BB->getTerminator(); IRBuilder<> Builder(I); Builder.CreateRet(Builder.CreateLoad(F.getArg(0), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(1), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(2), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(3), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(4), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(5), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(6), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(7), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(8), ""frame pointer"")); }" LLVM,RISCV,235, LLVM,RISCV,236,"void processFunctionBeforeFrameFinalized(Function &F, MachineFunction &MF) { const FrameInfo &FI = MF.getFrameInfo(); const TargetRegisterClass &RegClass = MF.getRegisterClass(); unsigned NumRegs = RegClass.getNumRegs(); unsigned NumUsedRegs = FI.getNumUsedRegs(); unsigned NumAvailRegs = NumRegs - NumUsedRegs; if (NumAvailRegs == 0) return; for (unsigned i = 0; i < NumAvailRegs; i++) { const TargetRegister &Reg = RegClass.getRegister(i); FI.addRegister(Reg); } }" LLVM,RISCV,237,"void emitEpilogue(Function *F, MachineFunction *MF) { BasicBlock *EpilogBB = MF->getEpilogBB(); if (!EpilogBB) return; if (MF->getFramePointerEliminated()) { emitError(""Frame pointer should not have been eliminated""); return; } if (MF->getSecondSPAdjustAmount() <= 0) { emitError(""Second SP adjust amount should be greater than zero""); return; } }" LLVM,RISCV,238,"void processFunctionBeforeFrameFinalized(Function &F, MachineFunction &MF) { const FrameInfo *FI = MF.getFrameInfo(); if (FI->hasFramePointer()) { Register FramePointer = FI->getFramePointer(); if (FramePointer.isCalleeSaved()) { MF.insertPrologue(new StoreInst(FramePointer, MF.getStackPointer(), MF.getModule().getDataLayout())); } } if (FI->hasStackPointer()) { Register StackPointer = FI->getStackPointer(); if (StackPointer.isCalleeSaved()) { MF.insertPrologue(new StoreInst(StackPointer, MF.getStackPointer(), MF.getModule().getDataLayout())); } } for (auto &Reg : FI->getCalleeSavedRegisters()) { MF.insertPrologue(new StoreInst(Reg, MF.getStackPointer(), MF.getModule().getDataLayout())); } }" LLVM,RISCV,239, const std::vector &CalleeSavedRegs) { unsigned X1 = TRI->getRegisterNumber(RISCV::X1); unsigned X8 = TRI->getRegisterNumber(RISCV::X8); for (unsigned Reg : CalleeSavedRegs) { if (Reg == X1 || Reg == X8) { TRI->setRegisterSaved(Reg); } else { TRI->setRegisterNotSaved(Reg); } } } LLVM,RISCV,240,"void emitEpilogue(LLVMContext &Context, Function &F) { if (!F.hasFnAttribute(Attribute::NoFramePointerElim)) { BasicBlock *EpilogBB = BasicBlock::Create(Context, ""epilog"", &F); IRBuilder<> Builder(EpilogBB); Builder.CreateStore(Builder.CreateLoad(F.getArg(0), ""fp""), ""sp""); Builder.CreateStore(Builder.CreateLoad(F.getArg(1), ""sp""), ""fp""); Builder.CreateBr(EpilogBB); } }" LLVM,RISCV,241,"bool restoreCalleeSavedRegisters(MachineFunction &MF) { if (!MF.hasCalleeSavedRegs()) return false; SmallVector CalleeSavedRegs; MF.getCalleeSavedRegs(CalleeSavedRegs); BasicBlock *RestoreBB = MF.createBasicBlock(); for (MCRegister Reg : CalleeSavedRegs) { MachineInstr *LoadInstr = MF.createLoadRegFromStackSlot(Reg, MF.getStackPointer(), 0); RestoreBB->addInstruction(LoadInstr); } MF.addBasicBlock(RestoreBB); return true; }" LLVM,RISCV,242, const std::vector &CalleeSavedRegs) { unsigned X1 = TRI->getRegisterNumber(RISCV::X1); unsigned X8 = TRI->getRegisterNumber(RISCV::X8); for (unsigned Reg : CalleeSavedRegs) { if (Reg == X1 || Reg == X8) { TRI->setRegisterSaved(Reg); } else { TRI->setRegisterNotSaved(Reg); } } } LLVM,RISCV,243,void determineFrameLayout(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTargetMachine *RISCVTM = static_cast(TM); const RISCVSubtarget *RISCVST = RISCVTM->getSubtarget(); const FrameLayout &FL = MF.getFrameLayout(); const FrameLayout::FrameLayoutType FLT = FL.getType(); const FrameLayout::FrameLayoutType FLT_RISCV = FLT.getRISCVType(); if (FLT_RISCV == FrameLayout::RISCV_Stack) { MF.setFrameLayout(FrameLayout::RISCV_Stack); } else if (FLT_RISCV == FrameLayout::RISCV_Register) { MF.setFrameLayout(FrameLayout::RISCV_Register); } else { MF.setFrameLayout(FrameLayout::Unknown); } } LLVM,RISCV,244,void emitEpilogue(Function *F) { if (!F->hasFramePointer()) { return; } Register framePointer = F->getFramePointer(); emitRestoreStackPointer(framePointer); emitRestoreFramePointer(framePointer); emitReturn(); } LLVM,RISCV,245,"void emitPrologue(MachineFunction &MF) { const RISCVTarget &RISCV = MF.getTarget(); MachineFrameInfo &MFI = MF.getFrameInfo(); MachineRegisterInfo &MRI = MF.getRegisterInfo(); MachineRegister FramePointer = MRI.getFramePointer(); MF.emitPrologue(RISCV, MFI, FramePointer); }" LLVM,RISCV,246,"APInt getFrameIndexReference(Module *M, StringRef FrameIndex) { APInt FrameIndexValue = M->getTargetSpecificValue(FrameIndex); APInt BaseRegister = FrameIndexValue.getBits(0, 4); APInt Offset = FrameIndexValue.getBits(4, 12); return APInt(12, BaseRegister, Offset); }" LLVM,RISCV,247,"void emitEpilogue(LLVMContext& Context, Function& F) { BasicBlock* BB = F.getEntryBlock(); Instruction* I = BB->getTerminator(); IRBuilder<> Builder(I); Builder.CreateRet(Builder.CreateLoad(F.getArg(0), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(1), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(2), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(3), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(4), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(5), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(6), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(7), ""frame pointer"")); Builder.CreateRet(Builder.CreateLoad(F.getArg(8), ""frame pointer"")); }" LLVM,RISCV,248,"void emitPrologue(Function &F, const TargetMachine &TM) { }" LLVM,RISCV,249,void eliminateCallFramePseudoInstr(MachineFunction &MF) { if (!MF.getTarget().hasCallFrameSetupPseudoInstr() || !MF.getTarget().hasCallFrameDestroyPseudoInstr()) return; for (auto &BB : MF.getBasicBlocks()) { for (auto &I : BB) { if (I.getOpcode() == RISCV::ADJCALLSTACKDOWN || I.getOpcode() == RISCV::sp_64 || I.getOpcode() == RISCV::sp) { I.eraseFromParent(); } } } } LLVM,RISCV,250, LLVM,RISCV,251,"void emitPrologue(LLVMContext &Context, Function &F) { const Target &RISCV = Context.getTarget(); if (!RISCV.hasFeature(""Shrink-wrapping"")) { emitPrologue(Context, F, RISCV); } } void emitPrologue(LLVMContext &Context, Function &F, const Target &RISCV) { }" LLVM,RISCV,252,"void emitEpilogue(MachineFunction &MF) { Register FP = MF.getFramePointer(); Register SP = MF.getStackPointer(); MF.emitInstruction(Instruction::CreateLoad(FP, SP, 0)); MF.emitInstruction(Instruction::CreateStore(SP, FP, 0)); MF.emitInstruction(Instruction::CreateLoad(FP, SP, 8)); MF.emitInstruction(Instruction::CreateStore(SP, FP, 8)); for (int i = 0; i < MF.getNumCalleeSavedRegisters(); i++) { Register Reg = MF.getCalleeSavedRegister(i); MF.emitInstruction(Instruction::CreateLoad(Reg, SP, 16 + i * 8)); MF.emitInstruction(Instruction::CreateStore(SP, Reg, 16 + i * 8)); }" LLVM,RISCV,253,void emitPrologue(MachineFunction &MF) { const TargetSpecificValue *TSValue = MF.getTargetSpecificValue(); if (!TSValue) return; const Instruction *Prologue = MF.getPrologue(); const Instruction *Epilogue = MF.getEpilogue(); if (!Prologue || !Epilogue) return; for (const Instruction *I = Prologue; I != Epilogue; I = I->getNextNode()) { MF.emitInstruction(I); } for (const Instruction *I = Epilogue; I != nullptr; I = I->getNextNode()) { MF.emitInstruction(I); } } LLVM,RISCV,254, LLVM,RISCV,255,void determineFrameLayout(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTargetMachine *RISCVTM = static_cast(TM); const RISCVSubtarget *RISCVST = RISCVTM->getSubtarget(); const FrameLayout &FL = MF.getFrameLayout(); const FrameLayout::FrameLayoutType FLT = FL.getType(); const FrameLayout::FrameLayoutType FLT_RISCV = FLT.getRISCVType(); if (FLT_RISCV == FrameLayout::RISCV_Stack) { MF.setFrameLayout(FrameLayout::RISCV_Stack); } else if (FLT_RISCV == FrameLayout::RISCV_Register) { MF.setFrameLayout(FrameLayout::RISCV_Register); } else { MF.setFrameLayout(FrameLayout::Unknown); } } LLVM,RISCV,256,"void eliminateCallFramePseudoInstr(Function &F, const Target &T) { if (!T.getTargetMachine().getTarget().hasCallFrameSetupPseudoInstr()) return; for (auto &BB : F) { for (auto &I : BB) { if (I.getOpcode() == Instruction::CallFrameSetup || I.getOpcode() == Instruction::CallFrameDestroy) { I.eraseFromParent(); } } } }" LLVM,RISCV,257,"void emitEpilogue(Function *F, MachineFunction *MF) { BasicBlock *EpilogBB = MF->getEpilogBB(); if (!EpilogBB) return; if (MF->getFramePointerEliminated()) { emitError(""Frame pointer should not have been eliminated""); return; } if (MF->getSecondSPAdjustAmount() <= 0) { emitError(""Second SP adjust amount should be greater than zero""); return; } }" LLVM,RISCV,258," IRBuilder<>& Builder, Value* FrameIndex, const DataLayout& DL) { Value* BaseReg = Builder.CreateLoad(FrameIndex, ""frame_index_base""); APInt Offset = APInt(DL.getPointerSizeInBits(), 0); if (ConstantInt* CI = dyn_cast(FrameIndex)) { Offset = CI->getValue(); } return std::make_pair(BaseReg, Offset); }" LLVM,RISCV,259,bool hasFP(Function &F) { if (F.hasFramePointer()) { return true; } if (F.getFramePointer() != F.getStackPointer()) { return true; } return false; } LLVM,RISCV,260,bool hasReservedCallFrame(Function &F) { if (F.getFramePointer()) { return true; } if (F.getCallSites().empty()) { return false; } for (auto &CS : F.getCallSites()) { if (CS.getArgumentSpace().empty()) { return false; } } return true; } LLVM,RISCV,261,"void processFunctionBeforeFrameFinalized(Function &F, MachineFunction &MF) { const FrameInfo &FI = MF.getFrameInfo(); const TargetRegisterClass &RegClass = MF.getRegisterClass(); unsigned NumRegs = RegClass.getNumRegs(); unsigned NumUsedRegs = FI.getNumUsedRegs(); unsigned NumAvailRegs = NumRegs - NumUsedRegs; if (NumAvailRegs == 0) return; for (unsigned i = 0; i < NumAvailRegs; i++) { const TargetRegister &Reg = RegClass.getRegister(i); FI.addRegister(Reg); } }" LLVM,RISCV,262,"bool restoreCalleeSavedRegisters(MachineFunction &MF) { if (!MF.hasCalleeSavedRegs()) return false; SmallVector CalleeSavedRegs; MF.getCalleeSavedRegs(CalleeSavedRegs); BasicBlock *RestoreBB = MF.createBasicBlock(); for (MCRegister Reg : CalleeSavedRegs) { MachineInstr *LoadInstr = MF.createLoadRegFromStackSlot(Reg, MF.getStackPointer(), 0); RestoreBB->addInstruction(LoadInstr); } MF.addBasicBlock(RestoreBB); return true; }" LLVM,RISCV,263,"bool spillCalleeSavedRegisters(RISCV::PseudoCALLReg &CallReg, RISCV::X5 &X5, RISCVII::MO_CALL &MO_CALL) { for (int i = 0; i < CallReg.getNumCalleeSavedRegs(); i++) { RISCV::X5 Reg = CallReg.getCalleeSavedReg(i); if (Reg.isValid()) { MO_CALL.emitStoreRegToStackSlot(Reg, X5.getStackSlot()); } } return true; }" LLVM,RISCV,264,void runOnFunction(Function &F) { const Target &T = F.getParent()->getTarget(); const TargetMachine &TM = F.getParent()->getTargetMachine(); const RISCVSubtarget &ST = TM.getSubtarget(); bool hasMaskedGather = false; bool hasMaskedScatter = false; for (auto &BB : F) { for (auto &I : BB) { if (isa(I) && (I.getIntrinsicID() == Intrinsic::masked_gather || I.getIntrinsicID() == Intrinsic::masked_scatter)) { hasMaskedGather = true; hasMaskedScatter = true; break; } } } if (hasMaskedGather || hasMaskedScatter) { } } LLVM,RISCV,265,void runOnFunction(Function &F) { const Target &T = F.getParent()->getTarget(); const TargetMachine &TM = F.getParent()->getTargetMachine(); const RISCVSubtarget &ST = TM.getSubtarget(); bool hasMaskedGather = false; bool hasMaskedScatter = false; for (auto &BB : F) { for (auto &I : BB) { if (isa(I) && (I.getIntrinsicID() == Intrinsic::masked_gather || I.getIntrinsicID() == Intrinsic::masked_scatter)) { hasMaskedGather = true; hasMaskedScatter = true; break; } } } if (hasMaskedGather || hasMaskedScatter) { } } LLVM,RISCV,266, LLVM,RISCV,267,"const char *getPassName(const Pass *pass) { if (pass->getPassKind() == RISCV) { return ""RISCV gather/scatter lowering""; } else { return pass->getPassName(); } }" LLVM,RISCV,268,void runOnFunction(Function &F) { const Target &T = F.getParent()->getTarget(); const TargetMachine &TM = F.getParent()->getTargetMachine(); const RISCVSubtarget &ST = TM.getSubtarget(); bool hasMaskedGather = false; bool hasMaskedScatter = false; for (auto &BB : F) { for (auto &I : BB) { if (isa(I) && (I.getIntrinsicID() == Intrinsic::masked_gather || I.getIntrinsicID() == Intrinsic::masked_scatter)) { hasMaskedGather = true; hasMaskedScatter = true; break; } } } if (hasMaskedGather || hasMaskedScatter) { } } LLVM,RISCV,269,"Triple merge(const Triple &T1, const Triple &T2) { if (!T1.isRISCV() || !T2.isRISCV()) return Triple(); if (T1.getVSETVLInfo() != T2.getVSETVLInfo()) return Triple(); Triple T = T1; T.setArchName(T2.getArchName()); T.setVendorName(T2.getVendorName()); T.setOSName(T2.getOSName()); T.setEnvironmentName(T2.getEnvironmentName()); T.setVSETVLInfo(T2.getVSETVLInfo()); return T; }" LLVM,RISCV,270,"void runOnMachineFunction(MachineFunction &MF) { const RISCVTargetMachine &TM = MF.getTargetMachine(); const RISCVSubtarget &ST = TM.getSubtarget(); MF.emitFunctionBody(); if (MF.getBlockInfo().empty()) { MF.emitWarning(""Expect empty block infos""); } }" LLVM,RISCV,271,"void dump(const char *message) { fprintf(stderr, ""RISCV: %s\n"", message); }" LLVM,RISCV,272,"void print(Value *V, raw_ostream &OS) { if (V->getType()->isPointerTy()) { OS << ""Pointer: ""; printPointer(V, OS); } else if (V->getType()->isIntegerTy()) { OS << ""Integer: ""; printInteger(V, OS); } else if (V->getType()->isFloatTy()) { OS << ""Float: ""; printFloat(V, OS); } else if (V->getType()->isDoubleTy()) { OS << ""Double: ""; printDouble(V, OS); } else if (V->getType()->isVectorTy()) { OS << ""Vector: ""; printVector(V, OS); } else if (V->getType()->isArrayTy()) { OS << ""Array: ""; printArray(V, OS); } else if (V->getType()->isStructTy()) { OS << ""Struct: ""; printStruct(V, OS); } else if (V->getType()->isFunctionTy()) { OS << ""Function: ""; printFunction(V, OS); } else { OS << ""Unknown: ""; printUnknown(V, OS); } } void printPointer(Value *V, raw_ostream &OS) { OS << ""Pointer to ""; print(V->getType()->getPointerElementType(), OS); } void printInteger(Value *V, raw_ostream &OS) { OS << ""Integer value: "" << V->getUniqueInteger().getSExtValue(); } void printFloat(Value *V, raw_ostream &OS) { OS << ""Float value: "" << V->getUniqueFloat().getFloatValue(); } void printDouble(Value *V, raw_ostream &OS) { OS << ""Double value: "" << V->getUniqueDouble().getDoubleValue(); }" LLVM,RISCV,273,void runOnMachineFunction(MachineFunction &MF) { } LLVM,RISCV,274,"void intersect(Set *this, Set *other) { Value *unknown = VSETVLIInfo::getUnknown(this->getContext()); Set *intersection = new Set(this->getContext()); for (Set::iterator it = this->begin(), end = this->end(); it != end; ++it) { Value *value = *it; if (other->count(value)) { intersection->insert(value); } } this->clear(); this->insert(intersection->begin(), intersection->end()); delete intersection; }" LLVM,RISCV,275,"const char *getPassName(const Pass *P) { if (P->getTargetMachine()->getTargetTriple().isRISCV()) { return ""RISCV""; } else { return ""Unknown""; } }" LLVM,RISCV,276,"void intersect(Set *this, Set *other) { Value *unknown = VSETVLIInfo::getUnknown(this->getContext()); Set *intersection = new Set(this->getContext(), unknown); for (Set::iterator it = this->begin(); it != this->end(); ++it) { if (other->count(*it) > 0) { intersection->insert(*it); } } this->clear(); this->insert(intersection->begin(), intersection->end()); }" LLVM,RISCV,277,bool isUnknown(MemDepResult result) { if (result.getKind() == MemDepResult::Unknown) { return true; } if (result.getKind() == MemDepResult::Computable && result.getResult() == MemDepResult::Unknown) { return true; } return false; } LLVM,RISCV,278,bool isValid(const RISCV::iterator &it) { return it.isValid(); } LLVM,RISCV,279,"Triple merge(const Triple &T1, const Triple &T2) { if (!T1.isRISCV() || !T2.isRISCV()) return Triple(); if (T1.getVSETVLInfo() != T2.getVSETVLInfo()) return Triple(); Triple T = T1; T.setArchName(T2.getArchName()); T.setVendorName(T2.getVendorName()); T.setOSName(T2.getOSName()); T.setEnvironmentName(T2.getEnvironmentName()); T.setVSETVLInfo(T2.getVSETVLInfo()); return T; }" LLVM,RISCV,280,"void runOnMachineFunction(MachineFunction &MF) { const RISCVTargetMachine &TM = MF.getTargetMachine(); const RISCVSubtarget &ST = TM.getSubtarget(); MF.emitFunctionBody(); if (MF.getBlockInfo().empty()) { MF.emitWarning(""Expect empty block infos""); } }" LLVM,RISCV,281,"void printInst(MCInst *Inst, raw_ostream &OS) { OS << Inst->getOpcode(); for (int i = 0; i < Inst->getNumOperands(); i++) { OS << "" "" << Inst->getOperand(i); } OS << "" "" << Inst->getSuffix(); }" LLVM,RISCV,282,"void printRegName(const char *regName) { const Target *target = TargetRegistry::lookupTarget(""riscv"", ""riscv""); const char *regName = target->getRegisterName(regName); outs() << regName << ""\n""; }" LLVM,RISCV,283,"void printInst(MCInst *Inst, raw_ostream &OS) { OS << Inst->getOpcode(); for (int i = 0; i < Inst->getNumOperands(); i++) { OS << "" "" << Inst->getOperand(i); } OS << "" "" << Inst->getSuffix(); }" LLVM,RISCV,284,"void applyTargetSpecificCLOption(const char *option, const char *value) { if (strcmp(option, ""riscv"") == 0) { if (strcmp(value, ""no-aliases"") == 0) { setTarget(RISCV, RISCV, ""no-aliases""); } else if (strcmp(value, ""numeric"") == 0) { setTarget(RISCV, RISCV, ""numeric""); } } }" LLVM,RISCV,285,"void printInst(MCInst *Inst, raw_ostream &OS) { OS << Inst->getOpcode(); for (int i = 0; i < Inst->getNumOperands(); i++) { OS << "" "" << Inst->getOperand(i); } OS << "" "" << Inst->getSuffix(); }" LLVM,RISCV,286,"void printInst(MCInst *Inst, raw_ostream &OS) { OS << Inst->getOpcode(); for (int i = 0; i < Inst->getNumOperands(); i++) { OS << "" "" << Inst->getOperand(i); } OS << "" "" << Inst->getSuffix(); }" LLVM,RISCV,287,"void printInst(MCInst *Inst, raw_ostream &OS) { OS << Inst->getOpcode(); for (int i = 0; i < Inst->getNumOperands(); i++) { OS << "" "" << Inst->getOperand(i); } OS << "" "" << Inst->getSuffix(); }" LLVM,RISCV,288,"void printInst(MCInst *Inst, raw_ostream &OS) { OS << Inst->getOpcode(); for (int i = 0; i < Inst->getNumOperands(); i++) { OS << "" "" << Inst->getOperand(i); } OS << "" "" << Inst->getSuffix(); }" LLVM,RISCV,289,"void printInst(MCInst *Inst, raw_ostream &OS) { OS << Inst->getOpcode(); for (int i = 0; i < Inst->getNumOperands(); i++) { OS << "" "" << Inst->getOperand(i); } OS << "" "" << Inst->getSuffix(); }" LLVM,RISCV,290,"void applyTargetSpecificCLOption(const char *option, const char *value) { if (strcmp(option, ""riscv"") == 0) { if (strcmp(value, ""no-aliases"") == 0) { setTarget(RISCV, RISCV, ""no-aliases""); } else if (strcmp(value, ""numeric"") == 0) { setTarget(RISCV, RISCV, ""numeric""); } } }" LLVM,RISCV,291,"void printInst(MCInst *Inst, raw_ostream &OS) { OS << Inst->getOpcode(); for (int i = 0; i < Inst->getNumOperands(); i++) { OS << "" "" << Inst->getOperand(i); } OS << "" "" << Inst->getSuffix(); }" LLVM,RISCV,292, LLVM,RISCV,293,"void expandPostRAPseudo(MachineFunction &MF, MachineInstr &MI) { const Target &T = MF.getTarget(); const RISCVTarget &RISCVT = static_cast(T); const MachineInstr &Pseudo = MI; switch (Pseudo.getOpcode()) { case RISCV::Pseudo::ADD: expandADD(MF, MI); break; case RISCV::Pseudo::SUB: expandSUB(MF, MI); break; case RISCV::Pseudo::MUL: expandMUL(MF, MI); break; case RISCV::Pseudo::DIV: expandDIV(MF, MI); break; case RISCV::Pseudo::REM: expandREM(MF, MI); break; default: llvm_unreachable(""Unknown pseudo instruction""); } }" LLVM,RISCV,294, LLVM,RISCV,295,bool isLoadFromStackSlot(const MachineInstr *MI) { if (MI->getOpcode() != RISCV::Load) return false; unsigned DestReg = MI->getOperand(0).getReg(); unsigned StackSlot = MI->getOperand(1).getReg(); if (DestReg >= RISCV::V0 && DestReg <= RISCV::V31) { unsigned PhysReg = MI->getOperand(0).getReg(); if (PhysReg >= RISCV::S0 && PhysReg <= RISCV::S31) { return true; } } return false; } LLVM,RISCV,296,"bool isStoreToStackSlot(const MachineInstr &MI, unsigned &FrameIndex, unsigned &SrcReg) { if (MI.getOpcode() != RISCV::Store || MI.getOperand(0).isStackSlot()) return false; FrameIndex = MI.getOperand(0).getStackSlot().getFrameIndex(); SrcReg = MI.getOperand(1).getReg(); return true; }" LLVM,RISCV,297," RISCV::ADDI64 addi64, RISCV::ADDI addi, RISCV::LI li) { RISCV::GR64BitRegClass destReg = RISCV::GR64BitRegClass(imm); RISCV::GR32BitRegClass srcReg = RISCV::GR32BitRegClass(srcReg); RISCV::zero_64 zero_64 = RISCV::zero_64(srcReg); RISCV::zero zero = RISCV::zero(destReg); RISCV::ADDI64 addi64 = RISCV::ADDI64(zero_64, imm); RISCV::ADDI addi = RISCV::ADDI(zero, imm); RISCV::LI li = RISCV::LI(destReg, addi64); }" LLVM,RISCV,298," const RISCV::StackSlot &StackSlot, unsigned Reg) { unsigned StackFrameIndex = StackSlot.getStackFrameIndex(); const RISCV::RegisterClass &RegClass = RegClass.getRegisterClass(Reg); Context.getRegister(RegClass, Reg).loadFromStackFrame(StackFrameIndex); }" LLVM,RISCV,299," RISCVRegister Reg, unsigned int StackFrameIndex) { Value *StackFramePtr = Context.getStackFramePointer(); Value *StackFrameIndexVal = Context.getConstantInt(StackFrameIndex); Value *RegVal = Context.getRegisterValue(RegClass, Reg); Instruction *StoreInst = new StoreInst(RegVal, StackFramePtr, StackFrameIndexVal); Context.insertInstruction(StoreInst); }" LLVM,RISCV,300, LLVM,RISCV,301, const MachineInstr *MIb) { if (!MIa->isLoad() && !MIa->isStore()) return false; if (!MIb->isLoad() && !MIb->isStore()) return false; if (MIa->getMemOperand()->getAddr() != MIb->getMemOperand()->getAddr()) return true; if (MIa->getMemOperand()->getSize() != MIb->getMemOperand()->getSize()) return true; if (MIa->getMemOperand()->getAlignment() != MIb->getMemOperand()->getAlignment()) return true; if (MIa->getMemOperand()->getMemoryRegion() != MIb->getMemOperand()->getMemoryRegion()) return true; return false; } LLVM,RISCV,302,"void buildOutlinedFrame(LLVMContext& Context, Function& F) { auto Target = F.getParent()->getTarget(); auto RISCVTarget = dyn_cast(Target); if (!RISCVTarget) { return; } auto Frame = new Frame(F.getContext(), F.getType()); Frame->setName(""outlined_frame""); F.addFrame(Frame); Frame->setTargetSpecificValue(RISCVTarget, RISCV::X5, RISCV::JALR, RISCV::X0, RISCV::X5, 0); }" LLVM,RISCV,303, LLVM,RISCV,304, SmallVectorImpl &BitFlags) { APInt DirectFlagMask = APInt::getAllOnesValue(MO.getBitWidth()); DirectFlag = MO.getImm() & DirectFlagMask; APInt BitFlagsMask = APInt::getAllOnesValue(MO.getBitWidth()) ^ DirectFlagMask; BitFlags.clear(); for (unsigned i = 0; i < MO.getNumOperands(); ++i) { APInt BitFlag = MO.getOperand(i).getImm() & BitFlagsMask; if (BitFlag != 0) { BitFlags.push_back(BitFlag); } } } LLVM,RISCV,305, LLVM,RISCV,306,"bool getMemOperandWithOffsetWidth(Instruction *I, Value *&Base, int64_t &Offset, unsigned &Width) { if (!I->isMemoryOperation()) return false; Base = I->getOperand(0); Offset = I->getOperand(1); Width = I->getOperand(2); if (Offset->getType()->isIntegerTy()) { int64_t ConstOffset = Offset->getSExtValue(); if (ConstOffset >= 0 && ConstOffset < Width) { return true; } } return false; }" LLVM,RISCV,307,"static const Target TheRISCVTarget = { ""riscv"", ""RISC-V"", RISCV::FeatureStdExtC, RISCV::C_NOP, RISCV::ADDI, RISCV::X0, RISCV::X0, 0 }; const Target *getNop() { return &TheRISCVTarget; }" LLVM,RISCV,308, LLVM,RISCV,309, LLVM,RISCV,310,static const TargetRegisterInfo *getRegisterInfo(const Target &T) { if (T.getArch() == Triple::riscv) { return &RISCVInstrInfo::getRegisterInfo(); } return nullptr; } LLVM,RISCV,311, LLVM,RISCV,312," RISCVCC::CondCode Cond, RISCV::PseudoBR PseudoBR) { if (Cond == RISCVCC::CondCode::Fallthrough) { llvm_unreachable(""insertBranch must not be told to insert a fallthrough""); } if (Cond == RISCVCC::CondCode::TwoComponents) { llvm_unreachable(""RISCV branch conditions have two components!""); } MachineInstr *Branch = new MachineInstr(MBB, TargetMBB, Cond, PseudoBR); MBB.addInstr(Branch); }" LLVM,RISCV,313," const TargetSpecificValue &TS) { if (!MBB->getTerminator()->isUnconditional()) { return; }" LLVM,RISCV,314," Instruction *InsertBefore) { CallInst *Call = CallInst::Create(OutlinedFunc, InsertBefore); Call->setTargetSpecificValue(RISCV, RISCV, RISCV::PseudoCALLReg, RISCV::X5, 0, RISCVII::MO_CALL); InsertBefore->getParent()->getInstList().insert(InsertBefore, Call); }" LLVM,RISCV,315,bool isAsCheapAsAMove(const Instruction *I) { if (I->getOpcode() == Instruction::Move) { return true; } if (I->getOpcode() == Instruction::RISCV) { if (I->getOperand(0)->getType()->isDoubleTy() || I->getOperand(0)->getType()->isFloatTy()) { return true; } if (I->getOpcode() == Instruction::ADDI || I->getOpcode() == Instruction::ORI || I->getOpcode() == Instruction::XORI) { return true; } } return false; } LLVM,RISCV,316,"bool isBranchOffsetInRange(uint32_t opcode, int32_t offset) { switch (opcode) { case RISCV::BEQ: case RISCV::BNE: case RISCV::BLT: case RISCV::BGE: case RISCV::BLTU: case RISCV::BGEU: return offset >= -13 && offset <= 13; case RISCV::JAL: case RISCV::PseudoBR: case RISCV::PseudoJump: return offset >= -32 && offset <= 32; default: llvm_unreachable(""Unexpected opcode!""); } }" LLVM,RISCV,317,bool isCopyInstrImpl(const MachineInstr *MI) { if (MI->getOpcode() == RISCV::ADDI) { if (MI->getOperand(0).isReg() && MI->getOperand(1).isReg()) { if (MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { return true; } } } return false; } LLVM,RISCV,318,"bool isFunctionSafeToOutlineFrom(Function *F, const TargetMachine *TM) { if (F->hasSideEffects()) return false; for (auto &I : F->getInstructions()) { if (!TM->isSupported(I.getOpcode())) return false; } for (auto &I : F->getCalls()) { if (!TM->isSupported(I.getCalledFunction()->getOpcode())) return false; } for (auto &I : F->getReferences()) { if (!TM->isSupported(I.getSymbol()->getOpcode())) return false; } for (auto &I : F->getInstructions()) { if (!TM->isSupported(I.getOpcode())) return false; } for (auto &I : F->getCalls()) { if (!TM->isSupported(I.getCalledFunction()->getOpcode())) return false; } for (auto &I : F->getReferences()) { if (!TM->isSupported(I.getSymbol()->getOpcode())) return false; }" LLVM,RISCV,319, LLVM,RISCV,320, LLVM,RISCV,321,bool isStoreToStackSlot(const MachineInstr *MI) { if (MI->getOpcode() != RISCV::STORE) return false; unsigned int SrcReg = MI->getOperand(0).getReg(); unsigned int StackSlot = MI->getOperand(1).getReg(); if (StackSlot >= RISCV::V0 && StackSlot <= RISCV::V31) return false; if (StackSlot >= RISCV::P0 && StackSlot <= RISCV::P31) return false; if (SrcReg >= RISCV::V0 && SrcReg <= RISCV::V31) return false; if (SrcReg >= RISCV::P0 && SrcReg <= RISCV::P31) return false; if (MI->getOperand(1).getReg() == StackSlot) return true; return false; } LLVM,RISCV,322,"void removeBranch(BasicBlock *BB) { Instruction *Term = BB->getTerminator(); if (Term->getOpcode() == Instruction::Br) { BasicBlock *Target = Term->getSuccessor(0); Term->eraseFromParent(); Instruction *NewTerm = new BranchInst(Target, BB); BB->getInstList().push_back(NewTerm); } }" LLVM,RISCV,323,bool reverseBranchCondition(RISCVCC::CondCode cond) { switch (cond) { case RISCVCC::CondCode::EQ: return RISCVCC::CondCode::NE; case RISCVCC::CondCode::NE: return RISCVCC::CondCode::EQ; case RISCVCC::CondCode::LT: return RISCVCC::CondCode::GE; case RISCVCC::CondCode::GE: return RISCVCC::CondCode::LT; case RISCVCC::CondCode::LE: return RISCVCC::CondCode::GT; case RISCVCC::CondCode::GT: return RISCVCC::CondCode::LE; default: return false; } } LLVM,RISCV,324,bool verifyInstruction(const Instruction *Inst) { if (Inst->getOpcode() == RISCVOp::OPERAND_FIRST_RISCV_IMM) { if (Inst->getOperand(0).getKind() != RISCVOp::OPERAND_UIMM2 && Inst->getOperand(0).getKind() != RISCVOp::OPERAND_UIMM3 && Inst->getOperand(0).getKind() != RISCVOp::OPERAND_UIMM4 && Inst->getOperand(0).getKind() != RISCVOp::OPERAND_UIMM5 && Inst->getOperand(0).getKind() != RISCVOp::OPERAND_UIMM7 && Inst->getOperand(0).getKind() != RISCVOp::OPERAND_UIMM12 && Inst->getOperand(0).getKind() != RISCVOp::OPERAND_SIMM12 && Inst->getOperand(0).getKind() != RISCVOp::OPERAND_UIMM20 && Inst->getOperand(0).getKind() != RISCVOp::OPERAND_UIMMLOG2XLEN) { return false; } LLVM,RISCV,325, const char* targetSpecificValue) { if (srcRegClass == RISCV::GPRRegClass::GPR && destRegClass == RISCV::FPR32RegClass::FPR32 && destRegClass == RISCV::FPR64RegClass::FPR64) { } else { } } LLVM,RISCV,326,unsigned getInstSizeInBytes(const MachineInstr *MI) { switch (MI->getOpcode()) { case RISCV::PseudoCALLReg: case RISCV::PseudoCALL: case RISCV::PseudoJump: case RISCV::PseudoTAIL: case RISCV::PseudoLLA: case RISCV::PseudoLA: case RISCV::PseudoLA_TLS_IE: case RISCV::PseudoLA_TLS_GD: return 4; case RISCV::PseudoAtomicLoadNand32: case RISCV::PseudoAtomicLoadNand64: return 8; case RISCV::PseudoMaskedAtomicSwap32: case RISCV::PseudoMaskedAtomicLoadAdd32: case RISCV::PseudoMaskedAtomicLoadSub32: return 12; case RISCV::PseudoMaskedAtomicLoadNand32: return 16; case RISCV::PseudoMaskedAtomicLoadMax32: case RISCV::PseudoMaskedAtomicLoadMin32: return 20; case RISCV::PseudoMaskedAtomicLoadUMax32: case RISCV::PseudoMaskedAtomicLoadUMin32: return 24; case RISCV::PseudoCmpXchg32: case RISCV::PseudoCmpXchg64: return 28; case RISCV::PseudoMaskedCmpXchg32: return 32; default: return ~0U; } } LLVM,RISCV,327, LLVM,RISCV,328, LLVM,RISCV,329," RISCV::PseudoBR Condition) { if (MBB.getTerminator()->getOpcode() == RISCV::PseudoBR) { llvm_unreachable(""insertBranch must not be told to insert a fallthrough""); } if (Condition.getNumOperands() != 2) { llvm_unreachable(""RISCV branch conditions have two components!""); } MachineBasicBlock *NewMBB = new MachineBasicBlock(MBB.getParent()); NewMBB->setName(""branch""); MachineInstr *BranchInstr = new MachineInstr(NewMBB, RISCV::PseudoBR); BranchInstr->addOperand(0, Condition.getOperand(0)); BranchInstr->addOperand(1, Condition.getOperand(1)); BranchInstr->addOperand(2, TargetMBB); NewMBB->addInstr(BranchInstr); MBB.getParent()->addBasicBlock(NewMBB); MBB.setTerminator(BranchInstr); }" LLVM,RISCV,330,"void insertIndirectBranch(BasicBlock *MBB, BasicBlock *NewDestBB) { if (!MBB->getTerminator()->isUnconditional()) { return; } Instruction *BranchInst = MBB->getTerminator(); BasicBlock *IndirectBranchBB = BasicBlock::Create(MBB->getContext(), ""indirect_branch"", MBB->getParent()); Instruction *IndirectBranchInst = BranchInst::Create(IndirectBranchBB, BranchInst->getDebugLoc()); IndirectBranchBB->setTerminator(IndirectBranchInst); MBB->getParent()->addBasicBlock(IndirectBranchBB); MBB->setTerminator(BranchInst); MBB->getParent()->addBasicBlock(NewDestBB); NewDestBB->setTerminator(BranchInst); }" LLVM,RISCV,331,bool isAsCheapAsAMove(const Instruction *I) { if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Sub || I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::Div || I->getOpcode() == Instruction::Rem) { return true; } if (I->getOpcode() == Instruction::And || I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::Xor) { return true; } if (I->getOpcode() == Instruction::Shl || I->getOpcode() == Instruction::Shr || I->getOpcode() == Instruction::Sar) { return true; } if (I->getOpcode() == Instruction::ADDI || I->getOpcode() == Instruction::ORI || I->getOpcode() == Instruction::XORI) { return true; } if (I->getOpcode() == Instruction::X0) { return true; } return false; } LLVM,RISCV,332, LLVM,RISCV,333,"void loadRegFromStackSlot(RISCV::GPRRegClass regClass, RISCV::GPRRegClass reg, int32_t stackSlot, RISCV::LW, RISCV::LD, RISCV::FPR32RegClass fpr32RegClass, RISCV::FLW, RISCV::FPR64RegClass fpr64RegClass, RISCV::FLD, const char* errorMessage, int errorCode) { if (!regClass.isValid()) { llvm::report_fatal_error(errorMessage, errorCode); } if (stackSlot < 0 || stackSlot >= 32) { llvm::report_fatal_error(errorMessage, errorCode); } switch (regClass) { case RISCV::GPRRegClass::GPR: emitLW(reg, stackSlot, RISCV::LW); break; case RISCV::GPRRegClass::FPR: emitFLW(reg, stackSlot, RISCV::FLW); break; case RISCV::GPRRegClass::FPR64: emitFLD(reg, stackSlot, RISCV::FLD); break; default: llvm::report_fatal_error(errorMessage, errorCode); } }" LLVM,RISCV,334,"void storeRegToStackSlot(RISCV::GPRRegClass regClass, RISCV::GPRRegClass stackSlot, uint32_t regIndex, uint32_t stackIndex) { if (regClass != RISCV::GPRRegClass) { llvm_unreachable(""Can't store this register to stack slot""); } if (stackSlot != RISCV::GPRRegClass) { llvm_unreachable(""Can't store this register to stack slot""); } uint32_t regValue = regIndex; uint32_t stackValue = stackIndex; asm volatile(""sw %0, %1"" : ""=r""(regValue) : ""r""(stackValue)); }" LLVM,RISCV,335,"void verifyInstruction(const Instruction *Inst) { if (Inst->getOpcode() != RISCVOp::OPERAND_FIRST_RISCV_IMM) { return; } if (Inst->getNumOperands() != RISCVOp::OPERAND_LAST_RISCV_IMM) { report_fatal_error(""Unexpected number of operands""); } for (unsigned i = 0; i < Inst->getNumOperands(); i++) { const Operand *Op = Inst->getOperand(i); if (Op->getType() != RISCVOp::OPERAND_UIMM4 && Op->getType() != RISCVOp::OPERAND_UIMM5 && Op->getType() != RISCVOp::OPERAND_UIMM12 && Op->getType() != RISCVOp::OPERAND_SIMM12 && Op->getType() != RISCVOp::OPERAND_SIMM13_LSB0 && Op->getType() != RISCVOp::OPERAND_UIMM20 && Op->getType() != RISCVOp::OPERAND_SIMM21_LSB0 && Op->getType() != RISCVOp::OPERAND_UIMMLOG2XLEN) { report_fatal_error(""Unexpected operand type""); } }" LLVM,RISCV,336, LLVM,RISCV,337,bool verifyInstruction(const Instruction *Inst) { if (Inst->getOpcode() != RISCV) return false; if (Inst->getNumOperands() != RISCVOp::OPERAND_FIRST_RISCV_IMM) return false; for (int i = 0; i < RISCVOp::OPERAND_LAST_RISCV_IMM; i++) { if (Inst->getOperand(i)->getType() != RISCVOp::OPERAND_UIMM4 && Inst->getOperand(i)->getType() != RISCVOp::OPERAND_UIMM5 && Inst->getOperand(i)->getType() != RISCVOp::OPERAND_UIMM12 && Inst->getOperand(i)->getType() != RISCVOp::OPERAND_SIMM12 && Inst->getOperand(i)->getType() != RISCVOp::OPERAND_UIMM20 && Inst->getOperand(i)->getType() != RISCVOp::OPERAND_UIMMLOG2XLEN) { return false; } } LLVM,RISCV,338, LLVM,RISCV,339, LLVM,RISCV,340,"void copyPhysReg(PhysReg *dst, PhysReg *src) { if (!dst->isValid()) { return; } if (!src->isValid()) { return; } dst->setValue(src->getValue()); }" LLVM,RISCV,341," RegScavenger &RS) { BasicBlock *NewBB = MBB->splitBasicBlock(MBB->getTerminator()); BranchInst *NewBranch = BranchInst::Create(NewBB, MBB->getTerminator()); NewBB->setTerminator(NewBranch); Register Reg = RS.createRegister(RISCV::GPRRegClass); LoadInst *Load = LoadInst::Create(Reg, NewDestBB->getLabel(), NewBB->getTerminator()); BranchInst *NewBranch = BranchInst::Create(NewBB, Load, NewBB->getTerminator()); NewBB->setTerminator(NewBranch); NewBB->setBranchTarget(NewDestBB); MBB->setBranchTarget(NewBB); }" LLVM,RISCV,342,unsigned getInstSizeInBytes(const MachineInstr *MI) { switch (MI->getOpcode()) { case RISCV::PseudoCALL: return 8; default: return ~0U; } } LLVM,RISCV,343,"void insertIndirectBranch(BasicBlock *MBB, BasicBlock *NewDestBB) { Instruction *IP = MBB->getTerminator(); BasicBlock *IndirectBB = BasicBlock::Create(MBB->getContext(), ""IndirectBB"", MBB->getParent()); BranchInst *NewBranch = BranchInst::Create(NewDestBB, IndirectBB); NewBranch->setIndirect(true); IP->getParent()->getInstList().push_back(NewBranch); MBB->setBranching(NewBranch); NewDestBB->setBranching(NewBranch); }" LLVM,RISCV,344," const char* targetSpecificValue) { switch (regClass) { case RISCV::GPRRegClass: emitLW(reg, slot, 0); break; case RISCV::FPR32RegClass: emitFLW(reg, slot, 0); break; case RISCV::FPR64RegClass: emitFLD(reg, slot, 0); break; default: assert(0 && ""Can't load this register from stack slot""); break; } }" LLVM,RISCV,345,"void storeRegToStackSlot(RISCV::GPRRegClass regClass, RISCV::SW stackFrameIndex, RISCV::FPR32RegClass fpr32RegClass, RISCV::FSW fsw, RISCV::FPR64RegClass fpr64RegClass, RISCV::FSD fsd, const char* targetSpecificValue) { if (regClass == RISCV::GPRRegClass::Invalid) { llvm_unreachable(""Invalid register class""); } if (stackFrameIndex == RISCV::SW::Invalid) { llvm_unreachable(""Invalid stack frame index""); } if (fpr32RegClass == RISCV::FPR32RegClass::Invalid) { llvm_unreachable(""Invalid FPR32 register class""); } if (fsw == RISCV::FSW::Invalid) { llvm_unreachable(""Invalid FSW register class""); } if (fpr64RegClass == RISCV::FPR64RegClass::Invalid) { llvm_unreachable(""Invalid FPR64 register class""); } if (fsd == RISCV::FSD::Invalid) { llvm_unreachable(""Invalid FSD register class""); } if (targetSpecificValue == nullptr) { llvm_unreachable(""Invalid target-specific value""); }" LLVM,RISCV,346,"void copyPhysReg(LLVMContext &Context, const RISCV::GPRRegClass &SrcRegClass, const RISCV::GPRRegClass &DstRegClass, const RISCV::Instruction &Inst) { if (SrcRegClass.getReg() == DstRegClass.getReg()) { return; } if (!SrcRegClass.isPhysical()) { return; } if (!DstRegClass.isPhysical()) { return; } if (SrcRegClass.getSize() != DstRegClass.getSize()) { return; } if (SrcRegClass.getRegClass() != RISCV::GPRRegClass) { return; } if (DstRegClass.getRegClass() != RISCV::GPRRegClass) { return; }" LLVM,RISCV,347,unsigned int getInstSizeInBytes(const MachineInstr *MI) { if (MI->getTarget() == RISCV) { unsigned int size = MI->getSize(); return size; } else { return ~0U; } } LLVM,RISCV,348,"void loadRegFromStackSlot(RISCV::GPRRegClass regClass, RISCV::LW lw, int stackFrameIndex) { RISCV::GPRReg reg = regClass.getRegister(stackFrameIndex); lw.load(reg, stackFrameIndex); }" LLVM,RISCV,349," RISCV::GPRReg reg) { if (!regClass.isValid(reg)) { llvm::errs() << ""Can't store this register to stack slot\n""; return; } int offset = stackFrameIndex * sizeof(RISCV::GPRReg); reg.store(offset); }" LLVM,RISCV,350,unsigned getInstSizeInBytes(const MachineInstr *MI) { if (MI->getOpcode() != RISCV::PseudoCALLReg && MI->getOpcode() != RISCV::PseudoCALL && MI->getOpcode() != RISCV::PseudoTAIL && MI->getOpcode() != RISCV::PseudoLLA && MI->getOpcode() != RISCV::PseudoLA && MI->getOpcode() != RISCV::PseudoLA_TLS_IE && MI->getOpcode() != RISCV::PseudoLA_TLS_GD) { return ~0U; } unsigned size = 0; switch (MI->getOpcode()) { case RISCV::PseudoCALLReg: size = 4; break; case RISCV::PseudoCALL: size = 4; break; case RISCV::PseudoTAIL: size = 4; break; case RISCV::PseudoLLA: size = 4; break; case RISCV::PseudoLA: size = 4; break; case RISCV::PseudoLA_TLS_IE: size = 4; break; case RISCV::PseudoLA_TLS_GD: size = 4; break; } return size; } LLVM,RISCV,351, LLVM,RISCV,352," RISCV::FLD FLD, const char *ErrorMessage) { if (!isValidStackSlot(Context, RegClass, StackFrameIndex)) { Context.emitError(ErrorMessage); return; }" LLVM,RISCV,353,"void storeRegToStackSlot(RISCV::GPRRegClass regClass, RISCV::GPRRegClass stackSlot, int32_t offset) { if (regClass == RISCV::GPRRegClass::Invalid) { return; } if (stackSlot == RISCV::GPRRegClass::Invalid) { return; }" LLVM,RISCV,354,"void commuteInstructionImpl(Instruction *I) { unsigned Opcode = I->getOpcode(); Value *Operand0 = I->getOperand(0); Value *Operand1 = I->getOperand(1); if (Opcode == RISCV::Opcode::RISCV) { if (I->hasImmediateValue()) { int ImmediateValue = I->getImmediateValue(); if (ImmediateValue >= 0 && ImmediateValue <= 3) { I->setOperand(0, Operand1); I->setOperand(1, Operand0); I->setOpcode(RISCV::Opcode::RISCV_COMMUTE); } else { llvm_unreachable(""Unexpected opcode index""); } } else { llvm_unreachable(""Unexpected opcode""); } } else { llvm_unreachable(""Unexpected opcode""); } }" LLVM,RISCV,355, LLVM,RISCV,356,unsigned int getInstSizeInBytes(const MachineInstr *MI) { if (MI->getTargetSpecificValue() == RISCV) { return MI->getSizeInBytes(); } else { return ~0U; } } LLVM,RISCV,357, LLVM,RISCV,358," RegScavenger &RS) { BasicBlock *NewBB = MBB->splitBasicBlock(MBB->getTerminator()); BranchInst *NewBranch = BranchInst::Create(NewBB, MBB->getTerminator()); NewBB->setTerminator(NewBranch); Register Reg = RS.createRegister(RISCV::GPRRegClass); LoadInst *Load = LoadInst::Create(Reg, NewDestBB->getLabel(), NewBB->getTerminator()); BranchInst *NewBranch = BranchInst::Create(NewBB, Load, NewBB->getTerminator()); NewBB->setTerminator(NewBranch); NewBB->setBranchTarget(NewDestBB); MBB->setBranchTarget(NewBB); }" LLVM,RISCV,359," Instruction *InsertBefore) { CallInst *Call = CallInst::Create(OutlinedFunc, InsertBefore); Call->setTargetSpecificValue(RISCV, RISCV, RISCV::PseudoCALLReg, RISCV::X5, 0, RISCVII::MO_CALL); InsertBefore->getParent()->getInstList().insert(InsertBefore, Call); }" LLVM,RISCV,360,bool isAsCheapAsAMove(const Instruction *I) { if (I->getOpcode() == RISCV::MOVE) { return true; } if (I->getOpcode() == RISCV::FSGNJ_D || I->getOpcode() == RISCV::FSGNJ_S || I->getOpcode() == RISCV::FSGNJ_H) { return true; } if (I->getOpcode() == RISCV::ADDI || I->getOpcode() == RISCV::ORI || I->getOpcode() == RISCV::XORI) { return true; } if (I->getOpcode() == RISCV::X0) { return true; } if (I->getNumOperands() == 2) { return true; } if (I->getNumOperands() == 1) { return true; } if (I->getNumOperands() == 0) { return true; } return false; } LLVM,RISCV,361,bool isCopyInstrImpl(const MachineInstr *MI) { if (MI->getOpcode() == RISCV::ADDI) { if (MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { return true; } } if (MI->getOpcode() == RISCV::FSGNJ_D || MI->getOpcode() == RISCV::FSGNJ_S || MI->getOpcode() == RISCV::FSGNJ_H) { if (MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { return true; } } return false; } LLVM,RISCV,362,bool verifyInstruction(const Instruction *Inst) { if (Inst->getOpcode() != RISCVOp::OPERAND_FIRST_RISCV_IMM) { return false; } if (Inst->getNumOperands() != RISCVOp::OPERAND_LAST_RISCV_IMM) { return false; } for (int i = 0; i < Inst->getNumOperands(); i++) { const Operand *Op = Inst->getOperand(i); if (Op->getType() != RISCVOp::OPERAND_UIMM2 && Op->getType() != RISCVOp::OPERAND_UIMM3 && Op->getType() != RISCVOp::OPERAND_UIMM4 && Op->getType() != RISCVOp::OPERAND_UIMM5 && Op->getType() != RISCVOp::OPERAND_UIMM7 && Op->getType() != RISCVOp::OPERAND_UIMM12 && Op->getType() != RISCVOp::OPERAND_SIMM12 && Op->getType() != RISCVOp::OPERAND_UIMM20 && Op->getType() != RISCVOp::OPERAND_UIMMLOG2XLEN && Op->getType() != RISCVOp::OPERAND_RVKRNUM) { return false; } } LLVM,RISCV,363, LLVM,RISCV,364,"bool shouldOutlineFromFunctionByDefault(Function *F) { if (F->hasAttribute(""riscv"")) { if (F->hasAttribute(""riscv.outline"")) { if (F->hasAttribute(""riscv.outline.default"")) { return true; } } } return false; }" LLVM,RISCV,365, LLVM,RISCV,366,"void loadRegFromStackSlot(RISCV::GPRRegClass regClass, RISCV::GPRRegClass reg, uint32_t stackSlot, RISCV::LW, RISCV::LD, RISCV::FPR32RegClass, RISCV::FLW, RISCV::FPR64RegClass, RISCV::FLD, RISCV::VRRegClass, RISCV::VL1RE8_V, RISCV::VRM2RegClass, RISCV::VL2RE8_V, RISCV::VRM4RegClass, RISCV::VL4RE8_V, RISCV::VRM8RegClass, RISCV::VL8RE8_V, const char* errorMessage, RISCV::GPRRegClass, RISCV::GPRRegClass, uint32_t targetSpecificValue) { if (!regClass.isValidReg(reg)) { throw std::runtime_error(errorMessage); }" LLVM,RISCV,367,"void storeRegToStackSlot(RISCV::GPRRegClass regClass, RISCV::GPRRegClass reg, uint32_t stackFrameIndex, RISCV::SW sw, RISCV::SD sd, RISCV::FPR32RegClass fpr32, RISCV::FSW fsw, RISCV::FPR64RegClass fpr64, RISCV::FSD fsd, RISCV::VRRegClass vr, RISCV::VS1R_V vs1r_v, RISCV::VRM2RegClass vrm2, RISCV::VS2R_V vs2r_v, RISCV::VRM4RegClass vrm4, RISCV::VS4R_V vs4r_v, RISCV::VRM8RegClass vrm8, RISCV::VS8R_V vs8r_v, const char* errorMessage) { if (!regClass.isValid(reg)) { llvm::errs() << errorMessage << ""\n""; return; }" LLVM,RISCV,368," const RISCVTargetMachine& TM) { static const char* flags[] = { ""riscv-call"", ""riscv-plt"", ""riscv-lo"", ""riscv-hi"", ""riscv-pcrel-lo"", ""riscv-pcrel-hi"", ""riscv-got-hi"", ""riscv-tprel-lo"", ""riscv-tprel-hi"", ""riscv-tprel-add"", ""riscv-tls-got-hi"", ""riscv-tls-gd-hi"", ""riscv-captab-pcrel-hi"", ""riscv-tprel-cincoffset"", ""riscv-tls-ie-captab-pcrel-hi"", ""riscv-tls-gd-captab-pcrel-hi"", ""riscv-ccall"" }; return flags; }" LLVM,RISCV,369," RISCV::BranchCondition Condition) { if (!RISCV::isValidBranchCondition(Condition)) { report_fatal_error(""Invalid branch condition""); } MachineInstr *BranchInstr = MBB.getTerminator(); if (BranchInstr->isFallthrough()) { report_fatal_error(""insertBranch must not be told to insert a fallthrough""); } RISCV::BranchCondition Condition1 = RISCV::getBranchCondition1(Condition); RISCV::BranchCondition Condition2 = RISCV::getBranchCondition2(Condition); if (Condition1 != RISCV::BranchCondition::None && Condition2 != RISCV::BranchCondition::None) { report_fatal_error(""RISCV branch conditions have two components!""); } MachineBasicBlock *TargetBB = TargetMBB.getParent(); if (!RISCVABI::isCheriPureCapABI(TargetBB)) { report_fatal_error(""Branch target is not a valid RISCV ABI""); }" LLVM,RISCV,370, LLVM,RISCV,371,bool isAsCheapAsAMove(Instruction *I) { if (I->getOpcode() == Instruction::Move) { return true; } if (I->getOpcode() == Instruction::RISCV) { if (I->getOperand(0)->getOpcode() == Instruction::CMove) { return true; } if (I->getOperand(0)->getOpcode() == Instruction::CIncOffset) { return true; } if (I->getOperand(0)->getOpcode() == Instruction::X0) { return true; } if (I->getOperand(0)->getOpcode() == Instruction::C0) { return true; } if (I->getOperand(0)->getOpcode() == Instruction::CIncOffsetImm) { return true; } if (I->getOperand(0)->getOpcode() == Instruction::FSGNJ_D) { return true; } if (I->getOperand(0)->getOpcode() == Instruction::FSGNJ_S) { return true; } if (I->getOperand(0)->getOpcode() == Instruction::ADDI) { return true; } LLVM,RISCV,372,"bool isBranchOffsetInRange(uint32_t opcode, int32_t offset) { switch (opcode) { case RISCV::BEQ: case RISCV::BNE: case RISCV::BLT: case RISCV::BGE: case RISCV::BLTU: case RISCV::BGEU: return offset >= -13 && offset <= 13; case RISCV::JAL: case RISCV::CJAL: case RISCV::PseudoBR: case RISCV::PseudoCBR: return offset >= -21 && offset <= 21; case RISCV::PseudoJump: case RISCV::PseudoCJump: return offset >= -32 && offset <= 32; default: return false; } }" LLVM,RISCV,373, LLVM,RISCV,374,bool isStoreToStackSlot(const MachineInstr &MI) { if (MI.getOpcode() == RISCV::SB || MI.getOpcode() == RISCV::SH || MI.getOpcode() == RISCV::SW || MI.getOpcode() == RISCV::FSH || MI.getOpcode() == RISCV::FSW || MI.getOpcode() == RISCV::SD || MI.getOpcode() == RISCV::FSD || MI.getOpcode() == RISCV::SC_64 || MI.getOpcode() == RISCV::SC_128 || MI.getOpcode() == RISCV::CSB || MI.getOpcode() == RISCV::CSH || MI.getOpcode() == RISCV::CSW || MI.getOpcode() == RISCV::CFSW || MI.getOpcode() == RISCV::CSD || MI.getOpcode() == RISCV::CFSD || MI.getOpcode() == RISCV::CSC_64 || MI.getOpcode() == RISCV::CSC_128) { if (MI.getOperand(0).isStackSlot()) { return true; } } return false; } LLVM,RISCV,375,unsigned getInstSizeInBytes(const MachineInstr *MI) { if (MI->getTarget() != RISCV) { return ~0U; } unsigned size = 0; switch (MI->getOpcode()) { case RISCV::PseudoCALLReg: case RISCV::PseudoCALL: case RISCV::PseudoJump: case RISCV::PseudoTAIL: case RISCV::PseudoLLA: case RISCV::PseudoLA: case RISCV::PseudoLA_TLS_IE: case RISCV::PseudoLA_TLS_GD: size = 4; break; case RISCV::PseudoLA_TLS_LE: size = MI->getTLSModel() == RISCV::TLSModel::InitialExec ? 4 : 8; break; default: return ~0U; } return size; } LLVM,RISCV,376, LLVM,RISCV,377, LLVM,RISCV,378,bool reverseBranchCondition(RISCV::HwlpBranch *branch) { RISCV::HwlpBranch::Condition condition = branch->getCondition(); switch (condition) { case RISCV::HwlpBranch::Condition::Equal: condition = RISCV::HwlpBranch::Condition::NotEqual; break; case RISCV::HwlpBranch::Condition::NotEqual: condition = RISCV::HwlpBranch::Condition::Equal; break; case RISCV::HwlpBranch::Condition::LessThan: condition = RISCV::HwlpBranch::Condition::GreaterThanOrEqual; break; case RISCV::HwlpBranch::Condition::LessThanOrEqual: condition = RISCV::HwlpBranch::Condition::GreaterThan; break; case RISCV::HwlpBranch::Condition::GreaterThan: condition = RISCV::HwlpBranch::Condition::LessThanOrEqual; break; case RISCV::HwlpBranch::Condition::GreaterThanOrEqual: condition = RISCV::HwlpBranch::Condition::LessThan; break; default: return true; } branch->setCondition(condition); return false; } LLVM,RISCV,379,"void verifyInstruction(const Instruction *Inst) { switch (Inst->getOpcode()) { case RISCV::OPERAND_FIRST_RISCV_IMM: case RISCV::OPERAND_LAST_RISCV_IMM: if (!isValidImmediate(Inst->getOperand(0))) { report_fatal_error(""Unexpected operand type""); } break; case RISCV::OPERAND_UIMM1: case RISCV::OPERAND_UIMM4: case RISCV::OPERAND_UIMM5: case RISCV::OPERAND_UIMM12: case RISCV::OPERAND_SIMM12: case RISCV::OPERAND_SIMM13_LSB0: case RISCV::OPERAND_UIMM20: case RISCV::OPERAND_SIMM21_LSB0: case RISCV::OPERAND_UIMMLOG2XLEN: if (!isValidImmediate(Inst->getOperand(0))) { report_fatal_error(""Invalid immediate""); } break; default: break; } }" LLVM,RISCV,380,"void buildOutlinedFrame(LLVMContext &Context, Function *F) { const Target &RISCVTarget = Context.getTarget(); Frame *Frame = new Frame(RISCVTarget); Frame->setName(""outlined_frame""); Frame->setType(RISCVTarget.getFrameType()); Frame->setAlignment(RISCVTarget.getFrameAlignment()); Frame->setSize(RISCVTarget.getFrameSize()); Frame->setOffset(RISCVTarget.getFrameOffset()); Frame->setLinkage(RISCVTarget.getFrameLinkage()); Frame->setVisibility(RISCVTarget.getFrameVisibility()); Frame->setSection(RISCVTarget.getFrameSection()); Frame->setAlignment(RISCVTarget.getFrameAlignment()); Frame->setPadding(RISCVTarget.getFramePadding()); Frame->setMetadata(RISCVTarget.getFrameMetadata()); F->addFrame(Frame); }" LLVM,RISCV,381, LLVM,RISCV,382,unsigned getInstSizeInBytes(const MachineInstr *MI) { if (MI->getTargetSpecificValue() == RISCV) { unsigned size = MI->getSizeInBytes(); if (MI->isPseudoCall() || MI->isPseudoTail()) { size = MI->getPseudoCallSizeInBytes(); } if (MI->isPseudoLoad() || MI->isPseudoStore()) { size = MI->getPseudoLoadStoreSizeInBytes(); } if (MI->isPseudoBranch()) { size = MI->getPseudoBranchSizeInBytes(); } return size; } return ~0U; } LLVM,RISCV,383,bool isLoadFromStackSlot(const MachineInstr *MI) { if (MI->getOpcode() == RISCV::LB || MI->getOpcode() == RISCV::LBU || MI->getOpcode() == RISCV::LH || MI->getOpcode() == RISCV::LHU || MI->getOpcode() == RISCV::LW || MI->getOpcode() == RISCV::FLW || MI->getOpcode() == RISCV::LWU || MI->getOpcode() == RISCV::LD || MI->getOpcode() == RISCV::FLD || MI->getOpcode() == RISCV::VLE_V_um || MI->getOpcode() == RISCV::VLE_V_um) { if (MI->getOperand(0).isReg() && MI->getOperand(0).getReg() == RISCV::SP) { return MI->getOperand(1).getReg(); } } return false; } LLVM,RISCV,384, LLVM,RISCV,385," const RISCV::GPRRegClass &Reg) { switch (RegClass) { case RISCV::GPRRegClass::GPR: Context.emit(RISCV::LW, Reg, StackFrameIndex); break; case RISCV::GPRRegClass::FPR: Context.emit(RISCV::FLW, Reg, StackFrameIndex); break; case RISCV::GPRRegClass::VGR: Context.emit(RISCV::VLE_V_um, Reg, StackFrameIndex); break; default: llvm_unreachable(""Can't load this register from stack slot""); } }" LLVM,RISCV,386,"void storeRegToStackSlot(RISCV::GPRRegClass regClass, RISCV::GPRRegClass reg, int32_t stackSlot, RISCV::SW sw, RISCV::SD sd, RISCV::FPR32RegClass fpr32, RISCV::FSW fsw, RISCV::FPR64RegClass fpr64, RISCV::FSD fsd, RISCV::VGRRegClass vgr, RISCV::VPRRegClass vpr, RISCV::VQRRegClass vqr, RISCV::VORRegClass vor, RISCV::VSE_V_um vse_v_um) { if (!regClass.isValid(reg)) { return; }" LLVM,RISCV,387,"void loadRegFromStackSlot(RISCV::GPRRegClass regClass, RISCV::LW lw, int stackFrameIndex) { RISCV::GPRReg reg = regClass.getRegister(stackFrameIndex); lw.load(reg, stackFrameIndex); }" LLVM,RISCV,388," RISCV::GPRReg reg) { if (!regClass.isValid(reg)) { llvm::errs() << ""Can't store this register to stack slot\n""; return; } int offset = stackFrameIndex * sizeof(RISCV::GPRReg); reg.store(offset); }" LLVM,RISCV,389,unsigned getInstSizeInBytes(const MachineInstr *MI) { if (MI->getTarget() != RISCV) return ~0U; if (MI->isPseudo()) { if (MI->getOpcode() == RISCV::PseudoCALL || MI->getOpcode() == RISCV::PseudoTAIL) return 8; if (MI->getOpcode() == RISCV::PseudoLLA) return 8; } return MI->getSize(); } LLVM,RISCV,390,"void insertIndirectBranch(BasicBlock *MBB, BasicBlock *NewDestBB) { if (!MBB->getTerminator()->isUnconditional()) { return; } Instruction *BranchInst = MBB->getTerminator(); BasicBlock *IndirectBranchBB = BasicBlock::Create(MBB->getContext(), ""indirect_branch"", MBB->getParent()); Instruction *IndirectBranchInst = BranchInst::Create(IndirectBranchBB, BranchInst->getDebugLoc()); IndirectBranchBB->setTerminator(IndirectBranchInst); MBB->getParent()->addBasicBlock(IndirectBranchBB); MBB->setTerminator(BranchInst); MBB->getParent()->addBasicBlock(NewDestBB); NewDestBB->setTerminator(BranchInst); }" LLVM,RISCV,391,"bool getMemOperandWithOffsetWidth(Instruction *I, Value *&Base, int64_t &Offset, int &Width) { if (!I->isMemoryOperation()) return false; Base = I->getOperand(0); Offset = I->getOperand(1); Width = I->getMemoryWidth(); return true; }" LLVM,RISCV,392," const RISCV::FSGNJ_S &DstInst, const char *TargetSpecificValue) { if (SrcRegClass.getReg() == DstRegClass.getReg()) { return; } if (!SrcRegClass.isPhysical()) { return; } if (!DstRegClass.isPhysical()) { return; } if (SrcRegClass.getRegClass() != DstRegClass.getRegClass()) { return; } if (SrcRegClass.isFloatingPoint()) { return; }" LLVM,RISCV,393,"void loadRegFromStackSlot(RISCV::GPRRegClass regClass, RISCV::LW loadOp, RISCV::FPR32RegClass fpRegClass, RISCV::FLW fpLoadOp, const char* errorMsg, int stackSlot) { if (regClass != RISCV::GPRRegClass && regClass != RISCV::FPR32RegClass) { llvm_unreachable(errorMsg); } if (regClass == RISCV::GPRRegClass) { loadOp(stackSlot); } else { fpLoadOp(stackSlot); } }" LLVM,RISCV,394,"void storeRegToStackSlot(RISCV::GPRRegClass regClass, RISCV::SW stackFrameIndex, RISCV::FPR32RegClass fprClass, RISCV::FSW fsw, const char* errorMessage) { if (regClass != RISCV::GPRRegClass::GPR) { llvm::errs() << errorMessage << ""\n""; return; } int regNum = regClass.getRegisterNumber(); if (fprClass != RISCV::FPR32RegClass::FPR32) { llvm::errs() << errorMessage << ""\n""; return; } int fprNum = fprClass.getRegisterNumber(); if (stackFrameIndex < 0 || stackFrameIndex >= RISCV::SW::getStackFrameSize()) { llvm::errs() << errorMessage << ""\n""; return; } RISCV::SW::setStackFrameValue(stackFrameIndex, regNum, fprNum); }" LLVM,RISCV,395,"void loadRegFromStackSlot(RISCV::GPRRegClass regClass, RISCV::GPRRegClass srcRegClass, uint32_t stackSlot, RISCV::LW, RISCV::LD, RISCV::GPRV64I8RegClass, RISCV::LDVI8, RISCV::GPRV64I16RegClass, RISCV::LDVI16, RISCV::GPRV64I32RegClass, RISCV::LDVI32, RISCV::FPR32RegClass, RISCV::FLW, RISCV::FPR64RegClass, RISCV::FLD, const char* targetSpecificValue) { uint32_t reg = regClass.getRegister(srcRegClass); uint32_t stackOffset = stackSlot * 4; uint32_t value = 0; switch (regClass) { case RISCV::GPRV64I8RegClass: value = load(reg, stackOffset, RISCV::LDVI8); break; case RISCV::GPRV64I16RegClass: value = load(reg, stackOffset, RISCV::LDVI16); break; case RISCV::GPRV64I32RegClass: value = load(reg, stackOffset, RISCV::LDVI32); break; case RISCV::FPR32RegClass: value = load(reg, stackOffset, RISCV::FLW); break; case RISCV::FPR64RegClass: value = load(reg, stackOffset, RISCV::FLD); break; default: break; }" LLVM,RISCV,396,"void storeRegToStackSlot(RISCV::GPRRegClass regClass, RISCV::GPRRegClass reg, unsigned int stackFrameIndex) { if (!regClass.isValidRegister(reg)) { return; } unsigned int stackFrameOffset = getStackFrameOffset(regClass, stackFrameIndex); storeRegToStackFrameOffset(reg, stackFrameOffset); } void storeRegToStackFrameOffset(RISCV::GPRRegClass reg, unsigned int stackFrameOffset) { switch (reg.getRegisterClass()) { case RISCV::GPRV64I8RegClass: storeRegToStackFrameOffset(reg, stackFrameOffset); break; case RISCV::GPRV64I16RegClass: storeRegToStackFrameOffset(reg, stackFrameOffset); break; case RISCV::GPRV64I32RegClass: storeRegToStackFrameOffset(reg, stackFrameOffset); break; case RISCV::FPR32RegClass: storeRegToStackFrameOffset(reg, stackFrameOffset); break; case RISCV::FPR64RegClass: storeRegToStackFrameOffset(reg, stackFrameOffset); break; default: return; } }" LLVM,RISCV,397,unsigned getInstSizeInBytes(const MachineInstr *MI) { if (MI->getTargetSpecificValue() == RISCV) { if (MI->isPseudo()) { if (MI->isPseudoCALL() || MI->isPseudoTAIL()) { return 4; } else if (MI->isPseudoLLA() || MI->isPseudoLA()) { return 4; } } else { return MI->getSizeInBytes(); } } return ~0U; } LLVM,RISCV,398,unsigned int getInstSizeInBytes(const MachineInstr *MI) { if (MI->getTargetSpecificValue() == RISCV::PseudoCALL || MI->getTargetSpecificValue() == RISCV::PseudoTAIL) { return MI->getSizeInBytes(); } return ~0U; } LLVM,RISCV,399,bool isCopyInstrImpl(const MachineInstr *MI) { if (MI->getOpcode() == RISCV::ADDI) { if (MI->getOperand(0).isReg() && MI->getOperand(1).isReg()) { if (MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { return true; } } } return false; } LLVM,RISCV,400, LLVM,RISCV,401,"void expandPostRAPseudo(MachineFunction &MF, MachineInstr &MI) { const Target &T = MF.getTarget(); const RISCVTarget &RISCVT = static_cast(T); const MachineInstr &Pseudo = MI; switch (Pseudo.getOpcode()) { case RISCV::Pseudo::ADD: expandADD(MF, MI); break; case RISCV::Pseudo::SUB: expandSUB(MF, MI); break; case RISCV::Pseudo::MUL: expandMUL(MF, MI); break; case RISCV::Pseudo::DIV: expandDIV(MF, MI); break; case RISCV::Pseudo::REM: expandREM(MF, MI); break; default: llvm_unreachable(""Unknown pseudo instruction""); } }" LLVM,RISCV,402,bool isLoadFromStackSlot(const MachineInstr *MI) { if (MI->getOpcode() != RISCV::Load) return false; unsigned DestReg = MI->getOperand(0).getReg(); unsigned StackSlot = MI->getOperand(1).getReg(); if (DestReg >= RISCV::V0 && DestReg <= RISCV::V31) { unsigned PhysReg = MI->getOperand(0).getReg(); if (PhysReg >= RISCV::S0 && PhysReg <= RISCV::S31) { return true; } } return false; } LLVM,RISCV,403,"bool isStoreToStackSlot(const MachineInstr &MI, unsigned &FrameIndex, unsigned &SrcReg) { if (MI.getOpcode() != RISCV::Store || MI.getOperand(0).isStackSlot()) return false; FrameIndex = MI.getOperand(0).getStackSlot().getFrameIndex(); SrcReg = MI.getOperand(1).getReg(); return true; }" LLVM,RISCV,404,"bool isBranchOffsetInRange(const MachineInstr *MI, int64_t Offset) { const MachineInstr *Branch = MI->getOperand(0).getInst(); unsigned Opcode = Branch->getOpcode(); if (Opcode != RISCV::BEQ && Opcode != RISCV::BNE && Opcode != RISCV::BLT && Opcode != RISCV::BGE && Opcode != RISCV::BLTU && Opcode != RISCV::BGEU && Opcode != RISCV::P_BEQIMM && Opcode != RISCV::P_BNEIMM && Opcode != RISCV::JAL && Opcode != RISCV::PseudoBR && Opcode != RISCV::LoopBranch) { return false; } int64_t Target = Branch->getOperand(0).getImm(); if (Target < 0 || Target >= 13) { return false; } return true; }" LLVM,RISCV,405,"bool reverseBranchCondition(LLVMContext &Context, const Instruction *Inst) { if (Inst->getOpcode() != Instruction::Br) return false; const Value *Cond = Inst->getOperand(0); if (Cond->getType()->isIntegerTy()) { const Value *NewCond = ConstantInt::get(Context, ~Cond->getUniqueInteger()); Inst->setOperand(0, NewCond); return true; } if (Cond->getType()->isRegisterTy()) { const Value *NewCond = ConstantInt::get(Context, ~Cond->getUniqueInteger()); Inst->setOperand(0, NewCond); return true; } if (Cond->getType()->isMemoryTy()) { const Value *NewCond = ConstantInt::get(Context, ~Cond->getUniqueInteger()); Inst->setOperand(0, NewCond); return true; } if (Cond->getType()->isFunctionTy()) { const Value *NewCond = ConstantInt::get(Context, ~Cond->getUniqueInteger()); Inst->setOperand(0, NewCond); return true; } if (Cond->getType()->isVectorTy()) { const Value *NewCond = ConstantInt::get(Context, ~Cond->getUniqueInteger()); Inst->setOperand(0, NewCond); return true; }" LLVM,RISCV,406,"void loadRegFromStackSlot(RegisterClass regClass, unsigned reg, unsigned stackSlot) { if (!isValidReg(regClass, reg)) { report_fatal_error(""Invalid register for loadRegFromStackSlot""); } switch (regClass) { case RISCV::GPRRegClass: loadRegFromStackSlot_GPR(reg, stackSlot); break; case RISCV::FPR32RegClass: loadRegFromStackSlot_FPR32(reg, stackSlot); break; case RISCV::FPR64RegClass: loadRegFromStackSlot_FPR64(reg, stackSlot); break; default: report_fatal_error(""Can't load this register from stack slot""); } } void loadRegFromStackSlot_GPR(unsigned reg, unsigned stackSlot) { emit_load(reg, stackSlot, RISCV::LD); } void loadRegFromStackSlot_FPR32(unsigned reg, unsigned stackSlot) { emit_load(reg, stackSlot, RISCV::FLW); } void loadRegFromStackSlot_FPR64(unsigned reg, unsigned stackSlot) { emit_load(reg, stackSlot, RISCV::FLD); }" LLVM,RISCV,407,"void storeRegToStackSlot(RISCV::GPRRegClass regClass, RISCV::SD stackFrameIndex, RISCV::SW reg) { if (regClass != RISCV::GPRRegClass) { throw ""Can't store this register to stack slot""; } stackFrameIndex.store(reg); }" LLVM,RISCV,408,unsigned int getInstSizeInBytes(const MachineInstr *MI) { if (MI->isPseudo()) { if (MI->getOpcode() == RISCV::PseudoCALL || MI->getOpcode() == RISCV::PseudoTAIL || MI->getOpcode() == RISCV::PseudoLLA) { return MI->getSizeInBytes(); } } return ~0U; } LLVM,RISCV,409,bool analyzeBranch(BasicBlock *MBB) { Instruction *Term = MBB->getTerminator(); if (Term->getOpcode() != Instruction::Br) { return false; } BasicBlock *Target = Term->getSuccessor(0); if (!Target || !Target->isValid()) { return false; } if (Target->getSpecificValue() == RISCV_SPECIFIC_VALUE) { return true; } return false; } LLVM,RISCV,410, const char* targetSpecificValue) { if (srcRegClass == RISCV::GPRRegClass::GPR && destRegClass == RISCV::FPR32RegClass::FPR32 && destRegClass == RISCV::FPR64RegClass::FPR64) { } else { } } LLVM,RISCV,411,unsigned int getInstSizeInBytes(const MachineInstr *MI) { if (MI->getTargetSpecificValue() == RISCV::PseudoCALL || MI->getTargetSpecificValue() == RISCV::PseudoTAIL) { return MI->getSizeInBytes(); } return ~0U; } LLVM,RISCV,412,"void insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TargetMBB) { if (TargetMBB == MBB.getFallthroughBlock()) { MBB.insertBranch(TargetMBB, MachineBasicBlock::BRANCH_FALLTHROUGH); return; } if (TargetMBB == MBB) { MBB.insertBranch(TargetMBB, MachineBasicBlock::BRANCH_SELF); return; } if (TargetMBB->isBranchTarget()) { MBB.insertBranch(TargetMBB, MachineBasicBlock::BRANCH_TARGET); return; } MBB.insertBranch(TargetMBB, MachineBasicBlock::BRANCH_FALLTHROUGH); }" LLVM,RISCV,413,"void insertIndirectBranch(BasicBlock *MBB, BasicBlock *NewDestBB) { if (!MBB->getTerminator()->isUnconditional()) { return; } Instruction *BranchInst = MBB->getTerminator(); BasicBlock *IndirectBranchBB = BasicBlock::Create(MBB->getContext(), ""indirect_branch"", MBB->getParent()); Instruction *IndirectBranchInst = BranchInst::Create(IndirectBranchBB, BranchInst->getDebugLoc()); IndirectBranchBB->setTerminator(IndirectBranchInst); MBB->getParent()->addBasicBlock(IndirectBranchBB); MBB->setTerminator(BranchInst); MBB->getParent()->addBasicBlock(NewDestBB); NewDestBB->setTerminator(BranchInst); }" LLVM,RISCV,414,"bool isBranchOffsetInRange(uint32_t opcode, int32_t offset) { switch (opcode) { case RISCV::BEQ: case RISCV::BNE: case RISCV::BLT: case RISCV::BGE: case RISCV::BLTU: case RISCV::BGEU: return offset >= -13 && offset <= 13; case RISCV::JAL: case RISCV::PseudoBR: return offset >= -21 && offset <= 21; default: llvm_unreachable(""Unexpected opcode!""); } }" LLVM,RISCV,415, LLVM,RISCV,416, LLVM,RISCV,417,"void loadRegFromStackSlot(RISCV::GPRRegClass regClass, RISCV::GPRRegClass reg, int32_t stackSlot, RISCV::LW, RISCV::LD, RISCV::FPR32RegClass fpr32RegClass, RISCV::FLW, RISCV::FPR64RegClass fpr64RegClass, RISCV::FLD, const char* errorMessage, int errorCode) { if (!regClass.isValid()) { llvm::report_fatal_error(errorMessage, errorCode); } if (stackSlot < 0 || stackSlot >= 32) { llvm::report_fatal_error(errorMessage, errorCode); } switch (regClass) { case RISCV::GPRRegClass::GPR: emitLW(reg, stackSlot, RISCV::LW); break; case RISCV::GPRRegClass::FPR: emitFLW(reg, stackSlot, RISCV::FLW); break; case RISCV::GPRRegClass::FPR64: emitFLD(reg, stackSlot, RISCV::FLD); break; default: llvm::report_fatal_error(errorMessage, errorCode); } }" LLVM,RISCV,418,"void removeBranch(BasicBlock *BB) { Instruction *Term = BB->getTerminator(); if (Term->getOpcode() == Instruction::Br) { BasicBlock *Target = Term->getSuccessor(0); Term->eraseFromParent(); Instruction *NewTerm = new BranchInst(Target, BB); BB->getInstList().push_back(NewTerm); } }" LLVM,RISCV,419,"bool reverseBranchCondition(LLVMContext &Context, const Instruction *Inst) { const Value *Condition = Inst->getOperand(0); if (isa(Condition)) { return true; } Type *ConditionType = Condition->getType(); if (ConditionType->isBooleanTy()) { return false; } if (ConditionType->isIntegerTy()) { return false; } if (ConditionType->isFloatingPointTy()) { return true; } if (ConditionType->isPointerTy()) { return true; } if (ConditionType->isVectorTy()) { return true; } if (ConditionType->isStructTy()) { return true; } if (ConditionType->isUnionTy()) { return true; }" LLVM,RISCV,420,"void storeRegToStackSlot(RISCV::GPRRegClass regClass, RISCV::GPRRegClass stackSlot, uint32_t regIndex, uint32_t stackIndex) { if (regClass != RISCV::GPRRegClass) { llvm_unreachable(""Can't store this register to stack slot""); } if (stackSlot != RISCV::GPRRegClass) { llvm_unreachable(""Can't store this register to stack slot""); } uint32_t regValue = regIndex; uint32_t stackValue = stackIndex; asm volatile(""sw %0, %1"" : ""=r""(regValue) : ""r""(stackValue)); }" LLVM,RISCV,421, LLVM,RISCV,422,"void select(Instruction *I) { if (I->getOpcode() == Instruction::Generic) { I->setOpcode(Instruction::RISCV); } } ``` This function takes an `Instruction` pointer `I` as an argument, and checks if the instruction is a generic instruction. If it is, it sets the opcode of the instruction to the target-specific opcode for the RISCV backend. You can use this function in your LLVM compiler by calling it on each instruction that you want to select the target-specific opcode for. For example: ``` void compile(Module *M) { for (auto &I : M->getInstructions()) { select(I); } }" LLVM,RISCV,423,"void dump(const char *message) { fprintf(stderr, ""RISCV, %s, Base, 0, null\n"", message); }" LLVM,RISCV,424,"const char *getPassName(const Pass *P) { if (P->getTargetMachine()->getTargetTriple().getArch() == llvm::Triple::riscv) { return ""RISCV DAG->DAG Pattern Instruction Selection""; } else { return ""Generic DAG->DAG Pattern Instruction Selection""; } }" LLVM,RISCV,425,"bool isValidOffset(RISCVAddressingMode mode, int64_t offset) { switch (mode) { case RISCVAddressingMode::OffRange: return offset >= 0 && offset < 128; case RISCVAddressingMode::Off12Only: return offset >= 0 && offset < 12; default: return false; } }" LLVM,RISCV,426,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTarget *RISCV = static_cast(TM->getTarget()); const RISCVTargetMachine *RISCVTM = static_cast(RISCV); } LLVM,RISCV,427, LLVM,RISCV,428, LLVM,RISCV,429,"void Select(LLVMContext &Context, ISD::Node *Node) { if (Node->getOpcode() == ISD::Constant) { if (Node->getType() == RISCV::X0) { } else if (Node->getType() == RISCV::FrameIndex) { } } else if (Node->getOpcode() == ISD::FrameIndex) { } }" LLVM,RISCV,430," Value *Addr) { const TargetAddressingMode *TAM = IA->getTargetAddressingMode(Constraint); if (!TAM) { errs() << ""Error: Target addressing mode not supported for constraint '"" << Constraint << ""'\n""; return; } const AddressingMode *AM = IA->getAddressingMode(Addr); if (!AM) { errs() << ""Error: Addressing mode not supported for address '"" << Addr << ""'\n""; return; } IA->selectAddressingMode(TAM, AM); }" LLVM,RISCV,431," Value *Addr) { const TargetAddressingMode *TAM = IA->getTargetAddressingMode(Constraint); if (!TAM) { errs() << ""Error: Target addressing mode not supported for constraint '"" << Constraint << ""'\n""; return; } const AddressingMode *AM = IA->getAddressingMode(Addr); if (!AM) { errs() << ""Error: Addressing mode not supported for address '"" << Addr << ""'\n""; return; } IA->selectAddressingMode(TAM, AM); }" LLVM,RISCV,432,void PostprocessISelDAG(ISelDAG *dag) { } LLVM,RISCV,433,void PostprocessISelDAG(ISelDAG *dag) { } LLVM,RISCV,434, LLVM,RISCV,435," const std::vector &TargetValues) { const std::string &Target = Context.getTargetTriple(); const std::string &Arch = Target.substr(0, Target.find(""-"")); const std::string &Vendor = Target.substr(Target.find(""-"") + 1); const std::string &OS = Target.substr(Target.find(""-"") + 2); if (Arch != ""riscv"") { return; } if (Vendor != ""riscv"") { return; } if (OS != ""riscv"") { return; }" LLVM,RISCV,436,void PostprocessISelDAG(ISelDAG *dag) { } LLVM,RISCV,437,"void PreprocessISelDAG(ISelDAG& DAG) { if (DAG.getTargetTriple().getArch() != Triple::riscv) return; if (DAG.getOpcode() != ISD::SPLAT_VECTOR_SPLIT_I64_VL) return; if (DAG.getNumOperands() != 3) { DAG.emitError(""Unexpected number of operands""); return; } if (DAG.getOperand(0).getType() != MVT::i64 || DAG.getOperand(1).getType() != MVT::i32 || DAG.getOperand(2).getType() != MVT::i32) { DAG.emitError(""Unexpected VTs!""); return; } if (DAG.getOpcode() != ISD::TokenFactor) return; if (DAG.getOperand(0).getType() != MVT::Other || DAG.getOperand(1).getType() != MVT::Other) { DAG.emitError(""Unexpected VTs!""); return; }" LLVM,RISCV,438,"void Select(LLVMContext& Context, ISD::Node* N) { const Target& T = Context.getTarget(); if (T.getArch() != Triple::riscv) { return; } ISD::Node* CurrentNode = N; if (CurrentNode->getOpcode() != ISD::Constant) { return; } int64_t ConstantValue = CurrentNode->getConstantValue(); if (ConstantValue != RISCV::X0) { return; } ISD::Node* MachineNode = new ISD::Node(ISD::Constant, ConstantValue); MachineNode->setOpcode(ISD::Constant); MachineNode->setConstantValue(ConstantValue); N = MachineNode; }" LLVM,RISCV,439,"void Select(LLVMContext &Context, ISD::Node *Node) { } ``` This function takes two arguments: * `Context`: an instance of the `LLVMContext` class, which provides access to the LLVM context and its associated data structures. * `Node`: a pointer to an `ISD::Node` object, which represents a node in the LLVM IR. The function body is empty, but it could be implemented to perform target-specific transformations on the node. For example, if the target is RISCV, the function could be implemented as follows: ``` void Select(LLVMContext &Context, ISD::Node *Node) { if (Node->getOpcode() == ISD::Constant) { if (Node->getType() == RISCV::X0) { } else if (Node->getType() == RISCV::FrameIndex) { } } else if (Node->getOpcode() == ISD::FrameIndex) { } else if (Node->getOpcode() == ISD::ADDI) { } }" LLVM,RISCV,440,void PostprocessISelDAG(ISelDAG *dag) { } LLVM,RISCV,441,"void Select(LLVMContext &Context, ISD::Node *Node) { } ``` This function takes two arguments: * `Context`: an instance of the `LLVMContext` class, which provides access to the LLVM context and its associated data structures. * `Node`: a pointer to an `ISD::Node` object, which represents a node in the LLVM IR. The function body is empty, but it could be implemented to perform target-specific transformations on the node. For example, if the target is RISCV, the function could be implemented as follows: ``` void Select(LLVMContext &Context, ISD::Node *Node) { if (Node->getOpcode() == ISD::Constant) { if (Node->getType() == RISCV::X0) { } else if (Node->getType() == RISCV::FrameIndex) { } } else if (Node->getOpcode() == ISD::FrameIndex) { } else if (Node->getOpcode() == ISD::ADDI) { } }" LLVM,RISCV,442,"void PreprocessISelDAG(ISelDAG& DAG) { if (DAG.getTargetTriple().getArch() != Triple::riscv) return; if (DAG.getOpcode() != ISD::SPLAT_VECTOR_SPLIT_I64_VL) return; if (DAG.getNumOperands() != 3) { DAG.emitError(""Unexpected number of operands""); return; } if (DAG.getOperand(0).getType() != MVT::i64 || DAG.getOperand(1).getType() != MVT::i32 || DAG.getOperand(2).getType() != MVT::i32) { DAG.emitError(""Unexpected VTs!""); return; } if (DAG.getOpcode() != ISD::TokenFactor) return; if (DAG.getOperand(0).getType() != MVT::Other || DAG.getOperand(1).getType() != MVT::Other) { DAG.emitError(""Unexpected VTs!""); return; }" LLVM,RISCV,443," const std::vector &TargetValues) { const std::string &Target = Context.getTargetTriple(); const std::string &Arch = Target.substr(0, Target.find(""-"")); const std::string &Vendor = Target.substr(Target.find(""-"") + 1); const std::string &OS = Target.substr(Target.find(""-"") + 2); if (Arch != ""riscv"") { return; } if (Vendor != ""riscv"") { return; } if (OS != ""riscv"") { return; }" LLVM,RISCV,444,"void Select(LLVMContext &Context, ISD::Node *Node) { TargetSpecificValue *RISCVValue = Context.getTargetSpecificValue(RISCV::ADDI64, MVT::i64); TargetSpecificValue *RISCVValue = Context.getTargetSpecificValue(RISCV::ADDI, MVT::i32); if (Node->getOpcode() == ISD::FrameIndex) { int64_t FrameIndex = Node->getFrameIndex(); if (FrameIndex >= 0) { MachineNode *MachineNode = new MachineNode(RISCVValue, FrameIndex, MVT::i64); Context.addMachineNode(MachineNode); } } }" LLVM,RISCV,445,"void Select(Module &M, Function &F) { auto &C = M.getContext(); auto *RISCV = M.getTargetMachine().getTarget(); auto *RISCVConstant = ConstantInt::get(C, APInt(32, 0)); auto *RISCVX0 = ConstantInt::get(C, APInt(32, 0)); auto *RISCVFrameIndex = ConstantInt::get(C, APInt(32, 0)); auto *RISCVADDI = ConstantInt::get(C, APInt(32, 0)); auto *MF = new MachineFunction(M, F); auto *BB = new BasicBlock(MF); auto *Inst = new Instruction(MF, RISCV, RISCVConstant, RISCVX0, RISCVFrameIndex, RISCVADDI); BB->addInstruction(Inst); MF->addBasicBlock(BB); MF->setEntryPoint(BB); }" LLVM,RISCV,446,"const char *getPassName(const Pass *P) { if (P->getTargetMachine()->getTargetTriple().getArch() == llvm::Triple::riscv) { return ""RISCV DAG->DAG Pattern Instruction Selection""; } else { return ""Generic DAG->DAG Pattern Instruction Selection""; } }" LLVM,RISCV,447,void PostprocessISelDAG(ISelDAG *dag) { } LLVM,RISCV,448,"void PreprocessISelDAG(ISelDAG& DAG) { if (DAG.getTargetTriple().getArch() != Triple::riscv) return; if (DAG.getOpcode() != ISD::SPLAT_VECTOR_SPLIT_I64_VL) return; if (DAG.getNumOperands() != 3) { DAG.emitError(""Unexpected number of operands""); return; } if (DAG.getOperand(0).getType() != MVT::i64 || DAG.getOperand(1).getType() != MVT::i32 || DAG.getOperand(2).getType() != MVT::i32) { DAG.emitError(""Unexpected VTs!""); return; } if (DAG.getOpcode() != ISD::TokenFactor) return; if (DAG.getOperand(0).getType() != MVT::Other || DAG.getOperand(1).getType() != MVT::Other) { DAG.emitError(""Unexpected VTs!""); return; }" LLVM,RISCV,449,void runOnMachineFunction(MachineFunction &MF) { const TargetMachine *TM = MF.getTargetMachine(); const RISCVTarget *RISCV = static_cast(TM->getTarget()); const RISCVTargetMachine *RISCVTM = static_cast(RISCV); } LLVM,RISCV,450," Value *Addr) { const TargetAddressingMode *TAM = IA->getTargetAddressingMode(Constraint); if (!TAM) { errs() << ""Error: Target addressing mode not supported for constraint '"" << Constraint << ""'\n""; return; } const AddressingMode *AM = IA->getAddressingMode(Addr); if (!AM) { errs() << ""Error: Addressing mode not supported for address '"" << Addr << ""'\n""; return; } IA->selectAddressingMode(TAM, AM); }" LLVM,RISCV,451, LLVM,RISCV,452,Type* getSetCCResultType(Instruction* I) { if (I->getOpcode() != Instruction::SETCC) { return nullptr; } Type* ResultType = I->getOperand(0)->getType(); if (!ResultType->isIntegerTy()) { return nullptr; } switch (ResultType->getIntegerBitWidth()) { case 32: return Type::getInt32Ty(I->getContext()); case 64: return Type::getInt64Ty(I->getContext()); default: return nullptr; } } LLVM,RISCV,453," const Instruction *FMul, const Instruction *FAdd) { if (FMA->getOpcode() != Instruction::FMA) return false; if (FMul->getOpcode() != Instruction::FMUL || FAdd->getOpcode() != Instruction::FADD) return false; if (FMA->getLatency() < FMul->getLatency() + FAdd->getLatency()) return true; return false; }" LLVM,RISCV,454, Register Reg) { LiveInSet &LiveIns = Target.getLiveInSet(); LiveIns.insert(Reg); } LLVM,RISCV,455," unsigned NumOuts, const RISCV::RISCVSubtarget &ST) { for (unsigned i = 0; i < NumOuts; i++) { const ISD::OutputArg &Out = Outs[i]; if (Out.getType()->isVectorTy()) { return false; } if (Out.getType()->isAggregateType()) { return false; } if (Out.getType()->isPointerTy()) { return false; } if (Out.getType()->isFunctionTy()) { return false; } if (Out.getType()->isMetadataTy()) { return false; } } return true; }" LLVM,RISCV,456,"void EmitInstrWithCustomInserter(Instruction *I, const Target *T) { if (I->hasCustomInserter()) { const TargetSpecificValue *TSVal = I->getTargetSpecificValue(); if (TSVal->getType() == TargetSpecificValue::RISCV) { const RISCVInstruction *RISCVInstr = TSVal->getRISCVInstruction(); if (RISCVInstr->getOpcode() == RISCV::SELECT_CC || RISCVInstr->getOpcode() == RISCV::SELECT_CC64 || RISCVInstr->getOpcode() == RISCV::FSELECT_CC_F || RISCVInstr->getOpcode() == RISCV::FSELECT_CC_D || RISCVInstr->getOpcode() == RISCV::CALL || RISCVInstr->getOpcode() == RISCV::CALLREG || RISCVInstr->getOpcode() == RISCV::CALL64 || RISCVInstr->getOpcode() == RISCV::CALLREG64) { T->emitRISCVInstructionWithCustomInserter(RISCVInstr, I->getParent()); } else { llvm_unreachable(""Unexpected instruction type to insert""); }" LLVM,RISCV,457, LLVM,RISCV,458, LLVM,RISCV,459,"Value *getExceptionSelectorRegister(Module *M, StringRef RegName) { if (!M->getNamedRegister(RegName)) { report_fatal_error(""Invalid register name: "" + RegName); } unsigned RegNum = M->getNamedRegister(RegName)->getRegNum(); if (RegNum < 16 || RegNum > 31) { report_fatal_error(""Invalid exception selector register: "" + RegName); } return M->getNamedRegister(RegName)->getValue(); }" LLVM,RISCV,460, LLVM,RISCV,461,"int getSingleConstraintMatchWeight(const char *constraint, Type *type) { if (constraint[0] == 'R' && constraint[1] == 'I' && constraint[2] == 'S' && constraint[3] == 'C' && constraint[4] == 'V') { if (type->isRISCV()) { if (constraint[5] == '8' || constraint[5] == '12' || constraint[5] == '16' || constraint[5] == '20' || constraint[5] == '0x7fffffff') { return 10; } } } return 1; }" LLVM,RISCV,462, LLVM,RISCV,463,"const char *getTargetNodeName(const Target *target) { if (target->getTriple().getArch() == llvm::Triple::riscv) { return ""RISCV""; } else { return ""Unknown""; } }" LLVM,RISCV,464,"void Initialize(LLVMContext &Context, RISCVTargetMachine *TM) { Context.setTarget(TM->getTarget()); Context.setTargetTriple(TM->getTargetTriple()); Context.setTargetData(TM->getTargetData()); TM->setTarget(Context.getTarget()); TM->setTargetTriple(Context.getTargetTriple()); TM->setTargetData(Context.getTargetData()); }" LLVM,RISCV,465, const RISCVFPImm *Imm) { if (!Imm->isFPImm()) return false; if (!RI->isFPImmLegal(Imm)) return false; if (!TM->isFPImmLegal(Imm)) return false; if (!ST->isFPImmLegal(Imm)) return false; if (!II->isFPImmLegal(Imm)) return false; return true; } LLVM,RISCV,466,"bool isOffsetFoldingLegal(const GlobalAddress &GA, int64_t Offset) { if (Offset % 4 != 0) return false; if (GA.getOffset() + Offset > GA.getSize()) return false; if (GA.getOffset() % 4 != 0) return false; return true; }" LLVM,RISCV,467, LLVM,RISCV,468,"void LowerOperation(Module &M, IRBuilder<> &Builder, Value *V) { Type *Ty = V->getType(); if (isa(V)) { Constant *C = cast(V); Builder.CreateConstant(C->getType(), C->getValue()); return; } if (isa(V)) { Function *F = cast(V); Builder.CreateCall(F, {}); return; } if (isa(V)) { GlobalVariable *GV = cast(V); Builder.CreateGlobalVariable(GV->getType(), GV->getName(), GV->getLinkage()); return; } if (isa(V)) { BasicBlock *BB = cast(V); Builder.CreateBlock(BB->getType(), BB->getName(), BB->getLinkage()); return; }" LLVM,RISCV,469," unsigned int NumOps, unsigned int OpOffset) { Value *RetVal = Outs[0]; if (!RetVal->getType()->isRegister()) { Context.emitError(""Can only return in registers!""); return; } Type *RetTy = RetVal->getType(); Instruction *RetInst = new ReturnInst(RetVal, RetTy); RetInst->setFlag(Flag); RetInst->setOther(Other); Context.getDAG()->addInstruction(RetInst); }" LLVM,RISCV,470,"const char *getTargetNodeName(RISCVISD::NodeType nodeType) { switch (nodeType) { case RISCVISD::FIRST_NUMBER: return ""RISCVISD::FIRST_NUMBER""; case RISCVISD::RET_FLAG: return ""RISCVISD::RET_FLAG""; case RISCVISD::CALL: return ""RISCVISD::CALL""; case RISCVISD::SELECT_CC: return ""RISCVISD::SELECT_CC""; case RISCVISD::BuildPairF64: return ""RISCVISD::BuildPairF64""; case RISCVISD::SplitF64: return ""RISCVISD::SplitF64""; default: return ""Unknown""; } }" LLVM,RISCV,471,"void LowerOperation(LLVMContext& Context, ISD::Node* Node) { const TargetSpecificValue* TSV = Node->getTargetSpecificValue(); if (TSV->isUnimplemented()) { return; } if (TSV->isGlobalAddress()) { return; } if (TSV->isBlockAddress()) { return; } if (TSV->isConstantPool()) { return; } if (TSV->isSELECT()) { return; } if (TSV->isVASTART()) { return; } if (TSV->isFRAMEADDR()) { return; } if (TSV->isRETURNADDR()) { return; } return; }" LLVM,RISCV,472," unsigned NumOuts, MVT::Type ReturnType) { if (Outs[0].isReg()) { return Outs[0]; } if (ReturnType == MVT::f64) { ISD::OutputArg Lo = Outs[0]; ISD::OutputArg Hi = Outs[1]; ISD::OutputArg Split = RISCVISD::SplitF64(Lo, Hi); return Split; } else { return Outs[0]; } }" LLVM,RISCV,473," RISCVABI::ABI ABI, RISCV::RISCV &RISCV) { for (unsigned i = 0; i < NumOuts; ++i) { const ISD::OutputArg &Out = Outs[i]; if (Out.getType()->isVectorTy()) { return false; } if (Out.getType()->isPointerTy()) { return false; } if (Out.getType()->isIntegerTy() && Out.getType()->getIntegerBitWidth() > 64) { return false; } } if (ABI != RISCVABI::ABI_RV64 && ABI != RISCVABI::ABI_RV32) { return false; } if (RISCV.getTargetTriple().getArch() != Triple::riscv64 && RISCV.getTargetTriple().getArch() != Triple::riscv32) { return false; } return true; }" LLVM,RISCV,474,unsigned ComputeNumSignBitsForTargetNode(const RISCVISD::Node* node) { switch (node->getOpcode()) { case RISCVISD::SLLW: case RISCVISD::SRAW: case RISCVISD::SRLW: return 1; case RISCVISD::DIVW: case RISCVISD::DIVUW: case RISCVISD::REMUW: return 1; default: return 0; } } LLVM,RISCV,475," MCInst &NewInst) { if (Inst.getOpcode() == RISCV::Select_GPR_Using_CC_GPR || Inst.getOpcode() == RISCV::Select_FPR32_Using_CC_GPR || Inst.getOpcode() == RISCV::Select_FPR64_Using_CC_GPR || Inst.getOpcode() == RISCV::BuildPairF64Pseudo || Inst.getOpcode() == RISCV::SplitF64Pseudo) { switch (Inst.getOpcode()) { case RISCV::Select_GPR_Using_CC_GPR: break; case RISCV::Select_FPR32_Using_CC_GPR: break; case RISCV::Select_FPR64_Using_CC_GPR: break; case RISCV::BuildPairF64Pseudo: break; case RISCV::SplitF64Pseudo: break; default: llvm_unreachable(""Unexpected instr type to insert""); } } else { NewInst = Inst; } }" LLVM,RISCV,476,ConstraintType getConstraintType(char constraint) { switch (constraint) { case 'R': return ConstraintType::RISCV; case 'I': return ConstraintType::RISCV; case 'S': return ConstraintType::RISCV; case '1': return ConstraintType::RISCV; case '0': return ConstraintType::RISCV; default: return ConstraintType::Unknown; } } LLVM,RISCV,477,"static const char *getExceptionPointerRegister(const Triple &T) { if (T.getArch() == Triple::riscv) { return ""x10""; } return nullptr; }" LLVM,RISCV,478,static Register getExceptionSelectorRegister(const Module *M) { if (M->hasTargetSpecificValue(TargetSpecificValue::ExceptionSelectorRegister)) { return M->getTargetSpecificValue(TargetSpecificValue::ExceptionSelectorRegister); } return RISCV::X11; } LLVM,RISCV,479, LLVM,RISCV,480, LLVM,RISCV,481,"bool getTgtMemIntrinsic(Intrinsic::ID id) { switch (id) { case Intrinsic::riscv_masked_atomicrmw_xchg_i32: case Intrinsic::riscv_masked_atomicrmw_add_i32: case Intrinsic::riscv_masked_atomicrmw_sub_i32: case Intrinsic::riscv_masked_atomicrmw_nand_i32: case Intrinsic::riscv_masked_atomicrmw_max_i32: case Intrinsic::riscv_masked_atomicrmw_min_i32: case Intrinsic::riscv_masked_atomicrmw_umax_i32: case Intrinsic::riscv_masked_atomicrmw_umin_i32: case Intrinsic::riscv_masked_cmpxchg_i32: return true; default: return false; } } ``` This function takes an `Intrinsic::ID` as input and returns a `bool` indicating whether the intrinsic will need to map to a `MemIntrinsicNode` on the RISCV backend. The function uses a `switch` statement to check the intrinsic ID and returns `true` for the intrinsics that are known to touch memory, and `false` for all other intrinsics. You can use this function in your LLVM compiler by calling it with the intrinsic ID of the instruction you want to check, like this: ``` if (getTgtMemIntrinsic(Intrinsic::riscv_masked_atomicrmw_xchg_i32)) { }" LLVM,RISCV,482, LLVM,RISCV,483," SmallVectorImpl &Ops) { const MachineOperand &Op = MI->getOperand(OpNum); if (Op.isReg()) { unsigned RegNum = Op.getReg(); if (RegNum >= RISCV_REG_X0 && RegNum <= RISCV_REG_X31) { Ops.push_back(MachineOperand::createReg(RegNum)); } else { Ops.push_back(MachineOperand::createReg(RegNum, RISCV_REG_X0)); } } else { Ops.push_back(MachineOperand::createImm(Op.getImm())); } }" LLVM,RISCV,484, LLVM,RISCV,485," RISCVInstruction* Inst) { Type* ResultType = Inst->getType(); if (!ResultType->isLegal()) { Type* LegalType = II.getCustomTypeLegalization(Inst); if (LegalType) { Inst->setType(LegalType); } else { Value* NewValue = nullptr; if (Inst->getOpcode() == RISCVISD::READ_CYCLE_WIDE) { NewValue = TM.getLegalValue(Inst->getOperand(0), LegalType); } else { NewValue = TM.getLegalValue(Inst->getOperand(0), LegalType); } Inst->setOperand(0, NewValue); } } }" LLVM,RISCV,486,const Target *getSubtarget(const TargetMachine &TM) { const Target *Target = TM.getTarget(); if (Target->getTargetID() == Triple::riscv) { return Target; } return nullptr; } LLVM,RISCV,487,"bool shouldNormalizeToSelectSequence(Value *N0, Value *N1, Value *X, Value *Y) { if (N0->getType()->isRISCV()) { if (N0->getType()->isInteger() && N1->getType()->isInteger()) { if (N0->getOpcode() == Instruction::And && N1->getOpcode() == Instruction::And) { if (N0->getOperand(0) == N1 && N0->getOperand(1) == X && N0->getOperand(2) == Y) { return true; } } if (N0->getOpcode() == Instruction::Or && N1->getOpcode() == Instruction::Or) { if (N0->getOperand(0) == N1 && N0->getOperand(1) == X && N0->getOperand(2) == Y) { return true; } } } } return false; }" LLVM,RISCV,488,"bool isFPImmLegal(MVT::Type type, MVT::Type immType) { if (immType.isFloatingPoint()) { if (type.isFloatingPoint()) { if (type.getFloatingPointImmediateInstruction()) { return true; } } } return false; }" LLVM,RISCV,489, LLVM,RISCV,490, LLVM,RISCV,491, LLVM,RISCV,492, LLVM,RISCV,493, LLVM,RISCV,494," RISCVRegisterInfo &RI, RISCVInstruction &Inst) { RISCVValue *Node = Inst.getOperand(0); MVT::Type NodeType = Node->getType(); if (NodeType == MVT::Other) { MVT::Type LegalType = II.getCustomTypeLegalization(Node); if (LegalType != MVT::Other) { RISCVValue *NewNode = II.buildCustomTypeLegalization(Node, LegalType); Inst.setOperand(0, NewNode); } else { std::cerr << ""Unexpected custom legalisation for node "" << Node->getName() << "" with type "" << NodeType << std::endl; } } }" LLVM,RISCV,495," raw_ostream& OS) { if (Inst.getOpcode() == RISCV::Select_GPR_Using_CC_GPR) { const MCOperand& CCReg = Inst.getOperand(0); const MCOperand& GPR = Inst.getOperand(1); Printer.printInstruction(Inst, OS); OS << "" // ""; Printer.printOperand(CCReg, OS); OS << "", ""; Printer.printOperand(GPR, OS); OS << ""\n""; } else { Printer.printInstruction(Inst, OS); } }" LLVM,RISCV,496,"static const char *getTargetNodeName(RISCVISD::NodeType nodeType) { switch (nodeType) { case RISCVISD::FIRST_NUMBER: return ""RISCVISD::FIRST_NUMBER""; case RISCVISD::RET_FLAG: return ""RISCVISD::RET_FLAG""; case RISCVISD::CALL: return ""RISCVISD::CALL""; case RISCVISD::SELECT_CC: return ""RISCVISD::SELECT_CC""; default: return ""Unknown""; } }" LLVM,RISCV,497, LLVM,RISCV,498, LLVM,RISCV,499," RISCVABI::ABI ABI, RISCV::RISCV &RISCV) { for (unsigned i = 0; i < NumOuts; ++i) { const ISD::OutputArg &Out = Outs[i]; if (Out.getType()->isVectorTy()) { return false; } if (Out.getType()->isPointerTy()) { return false; } if (Out.getType()->isIntegerTy() && Out.getType()->getIntegerBitWidth() > 64) { return false; } } if (ABI != RISCVABI::ABI_RV64 && ABI != RISCVABI::ABI_RV32) { return false; } if (RISCV.getTargetTriple().getArch() != Triple::riscv64 && RISCV.getTargetTriple().getArch() != Triple::riscv32) { return false; } return true; }" LLVM,RISCV,500,unsigned getNumRegistersForCallingConv(CallingConv::ID CC) { switch (CC) { case CallingConv::RISCV: return 1; case CallingConv::RISCV_FP: return 2; case CallingConv::RISCV_MVT: return 3; default: return 0; } } LLVM,RISCV,501,"Type* getRegisterTypeForCallingConv(CallingConv::ID CC, Type* Ty) { switch (CC) { case CallingConv::RISCV: case CallingConv::RISCV64: if (Ty->isFloatingPointTy()) { if (Ty->getFloatingPointKind() == MVT::f16) { return Type::getHalfTy(Ty->getContext()); } else if (Ty->getFloatingPointKind() == MVT::f32) { return Type::getFloatTy(Ty->getContext()); } } break; default: break; } return Ty; }" LLVM,RISCV,502, LLVM,RISCV,503,"bool hasAndNot(LLVMContext &Context, const RISCVTargetMachine &TM) { if (TM.getTarget().hasAndNot()) { return true; } return false; }" LLVM,RISCV,504,"bool isFPImmLegal(MVT::Type type, MVT::Value value) { if (!value.isFP()) { return false; } if (type == MVT::f16 && !target.hasFeature(RISCV::Feature::F16)) { return false; } if (type == MVT::f32 && !target.hasFeature(RISCV::Feature::F32)) { return false; } if (type == MVT::f64 && !target.hasFeature(RISCV::Feature::F64)) { return false; } if (type == MVT::f16 && !target.hasFeature(RISCV::Feature::F16_NATIVE)) { return false; } if (type == MVT::f32 && !target.hasFeature(RISCV::Feature::F32_NATIVE)) { return false; } if (type == MVT::f64 && !target.hasFeature(RISCV::Feature::F64_NATIVE)) { return false; } return true; }" LLVM,RISCV,505,bool shouldSinkOperands(Instruction *I) { if (I->getOpcode() != Instruction::RISCV) return false; if (!I->hasOperands()) return false; if (I->getIntrinsicID() != Intrinsic::fma) return false; if (!I->getOperand(0)->getParent() || !I->getOperand(1)->getParent() || I->getOperand(0)->getParent() != I->getOperand(1)->getParent()) return false; if (I->getOperand(0)->getType() != I->getOperand(1)->getType()) return false; if (I->getOperand(0)->getParent() == I->getParent() && I->getOperand(1)->getParent() == I->getParent()) return false; return true; } LLVM,RISCV,506, const RISCVInstruction &Inst) { if (TM.getTargetTriple().getArch() == Triple::riscv) { if (Inst.getOpcode() == RISCV::Load || Inst.getOpcode() == RISCV::Store) { if (Inst.getOperand(0).isReg()) { if (RI.isBaseRegister(Inst.getOperand(0).getReg())) { if (Inst.getOperand(0).getReg().getAlignment() != 0) { return true; } } } } } return false; } LLVM,RISCV,507," RISCVABI::ABI ABI, RISCV::RISCV &RISCV) { for (unsigned i = 0; i < NumOuts; ++i) { const ISD::OutputArg &Out = Outs[i]; if (Out.getType()->isVectorTy()) { return false; } if (Out.getType()->isPointerTy()) { return false; } if (Out.getType()->isIntegerTy() && Out.getType()->getIntegerBitWidth() > 64) { return false; } } if (ABI != RISCVABI::ABI_RV64 && ABI != RISCVABI::ABI_RV32) { return false; } if (RISCV.getTargetTriple().getArch() != Triple::riscv64 && RISCV.getTargetTriple().getArch() != Triple::riscv32) { return false; } return true; }" LLVM,RISCV,508, LLVM,RISCV,509,"bool convertSelectOfConstantsToMath(Value* Cond, Value* C1, Value* C2) { if (Cond->getType()->isIntegerTy(1)) { if (C1->getType()->isIntegerTy() && C2->getType()->isIntegerTy()) { if (isPowerOf2(Cond->getUniqueInteger().getZExtValue())) { return true; } } } return false; }" LLVM,RISCV,510,"void convertSetCCLogicToBitwiseLogic(LLVMContext& Context, Instruction* Inst) { const Target& Target = Context.getTarget(); if (Inst->getOpcode() != Instruction::ICmp) { return; } ICmpInst* CmpInst = cast(Inst); Value* LHS = CmpInst->getOperand(0); Value* RHS = CmpInst->getOperand(1); if (!isa(LHS) || !isa(RHS)) { return; } ConstantInt* LHSConst = cast(LHS); ConstantInt* RHSConst = cast(RHS); unsigned Opcode = CmpInst->getOpcode(); unsigned BitwiseOpcode = Target.getBitwiseOpcode(Opcode); Instruction* BitwiseInst = Target.createBitwiseInst(BitwiseOpcode, LHSConst, RHSConst); Inst->replaceAllUsesWith(BitwiseInst); Inst->eraseFromParent(); }" LLVM,RISCV,511,bool decomposeMulByConstant(int constant) { if (constant & (constant - 1)) { return false; } if (constant % 2 == 0) { return true; } if (constant % 4 == 0) { return true; } if (constant % 8 == 0) { return true; } if (constant % 12 == 0) { return true; } return false; } LLVM,RISCV,512,"void EmitInstrWithCustomInserter(LLVMContext &Context, Instruction *Instr) { if (Instr->hasCustomInserter()) { int TargetSpecificValue = Instr->getCustomInserterValue(); switch (TargetSpecificValue) { case RISCV::ReadCycleWide: break; case RISCV::Select_GPR_Using_CC_GPR: break; case RISCV::Select_FPR16_Using_CC_GPR: break; case RISCV::Select_FPR32_Using_CC_GPR: break; case RISCV::Select_FPR64_Using_CC_GPR: break; case RISCV::BuildPairF64Pseudo: break; case RISCV::SplitF64Pseudo: break; default: llvm_unreachable(""Unexpected instr type to insert""); } } }" LLVM,RISCV,513,"void emitLeadingFence(LLVMContext &Context, const RISCVSubtarget &Subtarget) { const MachineInstr *Instr = Subtarget.getInstr(); const RISCVTargetValue *TargetValue = Instr->getTargetValue(); const RISCVInstruction *FenceInstr = RISCVInstruction::create( Context, Instr->getOpcode(), Instr->getOperands(), TargetValue); FenceInstr->setIsLeadingFence(true); Instr->addInstruction(FenceInstr); } ``` This function takes an `LLVMContext` and a `RISCVSubtarget` as input, and uses them to create a new `RISCVInstruction` that represents the leading fence instruction. It then sets the `IsLeadingFence` flag on the instruction to indicate that it is a leading fence, and adds the instruction to the current instruction pointer. You can then use this function in your RISCV backend of LLVM Compiler by calling it whenever you need to emit a leading fence instruction. For example: ``` void emitLeadingFence(LLVMContext &Context, const RISCVSubtarget &Subtarget) { } void emitInstruction(LLVMContext &Context, const RISCVSubtarget &Subtarget, const MachineInstr *Instr) { if (Instr->getOpcode() == RISCVInstruction::Fence) { emitLeadingFence(Context, Subtarget); } }" LLVM,RISCV,514," Intrinsic::riscv_masked_cmpxchg_i64) { Function *Intrinsic = Intrinsic::getDeclaration(M, ID); BasicBlock *BB = BasicBlock::Create(Context, ""intrinsic"", M); Instruction *Instr = new Instruction(Intrinsic, BB); Instr->setOperand(0, Intrinsic::riscv_masked_cmpxchg_i32); Instr->setOperand(1, Intrinsic::riscv_masked_cmpxchg_i64); BB->addInstruction(Instr); return BB; }" LLVM,RISCV,515," AtomicOrdering Ordering) { Function* Intrinsic = Intrinsic::getDeclaration(&Context, ""llvm.riscv.atomicrmw"", {Type::getInt64Ty(Context), Type::getInt64Ty(Context), Type::getInt64Ty(Context)}); Value* MaskedPtr = Builder.CreateBitCast(Ptr, Type::getInt64Ty(Context)); Value* MaskedValue = Builder.CreateBitCast(Value, Type::getInt64Ty(Context)); Value* MaskedMask = Builder.CreateBitCast(Mask, Type::getInt64Ty(Context)); Value* MaskedAtomicRMW = Builder.CreateCall(Intrinsic, {MaskedPtr, MaskedValue, MaskedMask}, ""masked_atomicrmw""); Value* Result = Builder.CreateBitCast(MaskedAtomicRMW, Ptr->getType()); return Result; }" LLVM,RISCV,516, LLVM,RISCV,517,"enum ConstraintType { CT_REGISTER, CT_MEMORY, CT_IMMEDIATE, CT_ADDRESS, CT_INVALID }; ConstraintType getConstraintType(char constraint) { switch (constraint) { case 'r': return CT_REGISTER; case 'm': return CT_MEMORY; case 'i': return CT_IMMEDIATE; case 'a': return CT_ADDRESS; default: return CT_INVALID; } }" LLVM,RISCV,518,"static const char *getExceptionPointerRegister(const Triple &T) { if (T.getArch() == Triple::riscv) { return ""x10""; } return nullptr; }" LLVM,RISCV,519,static Register getExceptionSelectorRegister(const Module *M) { if (M->hasTargetSpecificValue(TargetSpecificValue::ExceptionSelectorRegister)) { return M->getTargetSpecificValue(TargetSpecificValue::ExceptionSelectorRegister); } return RISCV::X11; } LLVM,RISCV,520,"static const char *getExtendForAtomicCmpSwapArg(const AtomicCmpSwapInst *Inst) { if (Inst->getComparisonValue()->getType()->isIntegerTy()) { if (Inst->getComparisonValue()->getType()->getIntegerBitWidth() == 32) { return ""ZERO_EXTEND""; } else if (Inst->getComparisonValue()->getType()->getIntegerBitWidth() == 64) { return ""SIGN_EXTEND""; } } else if (Inst->getComparisonValue()->getType()->isAnyExtend()) { return ""ANY_EXTEND""; } return """"; }" LLVM,RISCV,521,"ISD::NodeType getExtendForAtomicOps(ISD::NodeType nodeType) { switch (nodeType) { case ISD::ZERO_EXTEND: return ISD::ZERO_EXTEND; case ISD::SIGN_EXTEND: return ISD::SIGN_EXTEND; case ISD::ANY_EXTEND: return ISD::ANY_EXTEND; default: llvm_unreachable(""Invalid node type for atomic operation""); } }" LLVM,RISCV,522,unsigned getNumRegistersForCallingConv(CallingConv::ID CC) { switch (CC) { case CallingConv::RISCV: return 1; case CallingConv::RISCV_FP: return 2; case CallingConv::RISCV_MVT: return 3; default: return 0; } } LLVM,RISCV,523,"unsigned getPrefTypeAlign(LLVMContext &Context, Type *Ty) { const DataLayout &DL = Context.getTargetDataLayout(); unsigned Align = DL.getPrefTypeAlignment(Ty); if (Ty->isVectorTy()) { Align = DL.getPrefTypeAlignment(Ty->getVectorElementType()); } return Align; }" LLVM,RISCV,524, LLVM,RISCV,525,"Type* getRegisterTypeForCallingConv(CallingConv::ID CC, Type* Ty) { switch (CC) { case CallingConv::RISCV: case CallingConv::RISCV64: if (Ty->isFloatingPointTy()) { if (Ty->getFloatingPointKind() == MVT::f16) { return Type::getHalfTy(Ty->getContext()); } else if (Ty->getFloatingPointKind() == MVT::f32) { return Type::getFloatTy(Ty->getContext()); } } break; default: break; } return Ty; }" LLVM,RISCV,526, LLVM,RISCV,527, LLVM,RISCV,528,bool getTgtMemIntrinsic(Intrinsic::ID id) { switch (id) { case Intrinsic::riscv_masked_atomicrmw_xchg_i32: case Intrinsic::riscv_masked_atomicrmw_add_i32: case Intrinsic::riscv_masked_atomicrmw_sub_i32: case Intrinsic::riscv_masked_atomicrmw_nand_i32: case Intrinsic::riscv_masked_atomicrmw_max_i32: case Intrinsic::riscv_masked_atomicrmw_min_i32: case Intrinsic::riscv_masked_atomicrmw_umax_i32: case Intrinsic::riscv_masked_atomicrmw_umin_i32: case Intrinsic::riscv_masked_cmpxchg_i32: case Intrinsic::riscv_masked_strided_load: case Intrinsic::riscv_masked_strided_store: return true; default: return false; } } LLVM,RISCV,529,"Type* getVPExplicitVectorLengthTy(Instruction* I) { if (I->getOpcode() == Instruction::VP_ADD || I->getOpcode() == Instruction::VP_SUB || I->getOpcode() == Instruction::VP_MUL || I->getOpcode() == Instruction::VP_DIV || I->getOpcode() == Instruction::VP_REM || I->getOpcode() == Instruction::VP_SHL || I->getOpcode() == Instruction::VP_SHR || I->getOpcode() == Instruction::VP_AND || I->getOpcode() == Instruction::VP_OR || I->getOpcode() == Instruction::VP_XOR || I->getOpcode() == Instruction::VP_NOT) { Type* vectorType = I->getOperand(0)->getType(); if (vectorType->isRISCVVectorTy()) { unsigned numElements = vectorType->getVectorNumElements(); Type* explicitVectorLengthType = Type::getIntNTy(I->getContext(), numElements); return explicitVectorLengthType; } }" LLVM,RISCV,530, LLVM,RISCV,531,"bool isCheapToSpeculateCttz(LLVMContext &Context, const IntrinsicInst *II) { if (II->getIntrinsicID() != Intrinsic::cttz) return false; if (II->getNumOperands() == 1 && II->getOperand(0)->getType()->isIntegerTy()) { if (isa(II->getOperand(0))) { if (II->getOperand(0)->getUniqueInteger().isPowerOf2()) { return true; } } } return false; }" LLVM,RISCV,532, LLVM,RISCV,533,bool isEligibleForTailCallOptimization(CallInst *CI) { if (!CI->isTailCall()) { return false; } if (CI->getCalledFunction()->getIntrinsicID() != Intrinsic::riscv_interrupt) { return false; } if (!CI->getCalledFunction()->hasAttribute(Attribute::Interrupt)) { return false; } if (!CI->getCalledFunction()->hasAttribute(Attribute::RISCVInterrupt)) { return false; } if (!CI->getCalledFunction()->hasAttribute(Attribute::RISCVInterrupt) || !CI->getCalledFunction()->hasAttribute(Attribute::Interrupt)) { return false; } if (!CI->getCalledFunction()->hasAttribute(Attribute::RISCVInterrupt) || !CI->getCalledFunction()->hasAttribute(Attribute::Interrupt) || CI->getCalledFunction()->getReturnType() != Type::getVoidTy(CI->getContext())) { return false; } LLVM,RISCV,534,"bool isFMAFasterThanFMulAndFAdd(MVT::f16, MVT::f32, MVT::f64) { if (!hasFMA()) { return false; } if (hasFMAFasterThanFMulAndFAdd()) { return true; } if (hasFMulAndFAddFasterThanFMA()) { return false; } return hasFMA() && hasFMAFasterThanFMulAndFAdd(); }" LLVM,RISCV,535,"bool isFPImmLegal(MVT::Type type, MVT::Value value) { if (!value.isFP()) { return false; } if (type == MVT::f16 && !target.hasFeature(RISCV::Feature::F16)) { return false; } if (type == MVT::f32 && !target.hasFeature(RISCV::Feature::F32)) { return false; } if (type == MVT::f64 && !target.hasFeature(RISCV::Feature::F64)) { return false; } if (type == MVT::f16 && !target.hasFeature(RISCV::Feature::F16_NATIVE)) { return false; } if (type == MVT::f32 && !target.hasFeature(RISCV::Feature::F32_NATIVE)) { return false; } if (type == MVT::f64 && !target.hasFeature(RISCV::Feature::F64_NATIVE)) { return false; } return true; }" LLVM,RISCV,536, LLVM,RISCV,537,"bool isShuffleMaskLegal(uint64_t mask) { if (mask == 0 || mask == 1 || mask == 2 || mask == 3 || mask == 4 || mask == 5 || mask == 6 || mask == 7) { return true; } return false; } ``` This function takes a single argument `mask`, which is a 64-bit unsigned integer representing the shuffle mask. The function checks if the mask is a valid shuffle mask for the RISCV backend by comparing it to a set of known valid masks. If the mask is valid, the function returns `true`, otherwise it returns `false`. You can use this function in your RISCV backend to check if a given shuffle mask is legal before attempting to use it in a vector shuffle operation. For example: ``` if (isShuffleMaskLegal(mask)) { } else { }" LLVM,RISCV,538, LLVM,RISCV,539, const Target &Target) { } LLVM,RISCV,540,"bool mayBeEmittedAsTailCall(CallInst *CI, const Target &RISCV) { if (!CI->isTailCall()) { return false; } if (!RISCV.hasTailCallSupport()) { return false; } if (!CI->isDirectCall()) { return false; } Function *F = CI->getCalledFunction(); if (!F->isLeaf()) { return false; } if (!F->hasReturn()) { return false; } if (F->hasReturn() && !F->getReturnInst()->isTailCall()) { return false; } if (F->hasReturn() && !F->getReturnInst()->isDirectCall()) { return false; } if (F->hasReturn() && !F->getReturnInst()->isLeaf()) { return false; } if (F->hasReturn() && !F->getReturnInst()->isTailCall()) { return false; } if (F->hasReturn() && !F->getReturnInst()->isDirectCall()) { return false; }" LLVM,RISCV,541,"void mergeStoresAfterLegalization(LLVMContext &Context, const MVT::i1 &TargetSpecificValue) { if (TargetSpecificValue == MVT::i1::RISCV) { return; } }" LLVM,RISCV,542," RISCVABI::ABI_LP64 ABI_LP64, MVT::f32 F32) { if (Ty->isFloatingPointTy()) { if (ABI == RISCVABI::ABI_LP64) { if (F32) { return true; } } } return false; }" LLVM,RISCV,543,"bool shouldSignExtendTypeInLibCall(LLVMContext &Context, const MVT::Type *Type) { if (!Type->isIntegerTy()) return false; if (!Type->isSigned()) return false; if (Type->getIntegerBitWidth() != 32) return false; if (Context.getTargetTriple().getArch() != Triple::riscv) return false; if (Context.getTargetTriple().getArchSize() != 32) return false; if (Context.getTargetTriple().getEndianness() != Triple::LittleEndian) return false; return true; }" LLVM,RISCV,544,bool shouldSinkOperands(Instruction *I) { if (I->getOpcode() != Instruction::RISCV) return false; if (!I->hasOperands()) return false; if (I->getIntrinsicID() != Intrinsic::fma) return false; if (!I->getOperand(0)->getParent() || !I->getOperand(1)->getParent() || I->getOperand(0)->getParent() != I->getOperand(1)->getParent()) return false; if (I->getOperand(0)->getType() != I->getOperand(1)->getType()) return false; if (I->getOperand(0)->getParent() == I->getParent() && I->getOperand(1)->getParent() == I->getParent()) return false; return true; } LLVM,RISCV,545,"void splitValueIntoRegisterParts(Value *V, unsigned int NumParts) { if (NumParts == 0) { report_fatal_error(""The number of element should not be zero.""); } Type *Ty = V->getType(); if (Ty->isVectorTy()) { unsigned int NumElements = Ty->getVectorNumElements(); if (NumParts > NumElements) { report_fatal_error(""The number of parts should not be greater than the number of elements.""); } for (unsigned int i = 0; i < NumParts; i++) { Type *PartTy = Ty->getVectorElementType(i); Value *Part = new Value(PartTy); Part->setValue(V->getAggregateElement(i)); Parts.push_back(Part); } } else { report_fatal_error(""The type is not a vector type.""); } }" LLVM,RISCV,546,"void AdjustInstrPostInstrSelection(MachineInstr *MI) { if (MI->hasPostISelHook()) { int OpIdx = RISCV::getNamedOperandIdx(MI, RISCV::OpName::FRM); if (OpIdx != -1 && MI->getOperand(OpIdx).isFPRoundingMode()) { RISCVFPRndMode RndMode = MI->getOperand(OpIdx).getFPRoundingMode(); if (RndMode == RISCVFPRndMode::DYN) { MI->setOperand(OpIdx, RISCV::FRM::RNE); } } } }" LLVM,RISCV,547," const SDIVInst *Inst, Value *Dividend, Value *Divisor) { if (!isPowerOf2(Divisor)) { Builder.CreateCall(Inst->getCalledFunction(), {Dividend, Divisor}); return; } int ShiftAmount = log2(Divisor); Value *ShiftedDividend = Builder.CreateShl(Dividend, ShiftAmount); Value *Quotient = Builder.CreateSDiv(ShiftedDividend, Divisor); Value *ShiftedQuotient = Builder.CreateLShr(Quotient, ShiftAmount); Builder.CreateRet(ShiftedQuotient); }" LLVM,RISCV,548, LLVM,RISCV,549, const TargetInstrInfo &TII) { if (!Instr->hasCustomInserter()) return; uint64_t TargetSpecificValue = TII.getTargetSpecificValue(Instr); if (Instr->getOpcode() == RISCV::ReadCycleWide) { if (Context.getTargetMachine().getTargetTriple().isRISCV32()) { Instr->setCustomInserter(TII.getCustomInserter(Instr)); } else { Instr->setCustomInserter(nullptr); } LLVM,RISCV,550, LLVM,RISCV,551,bool getTgtMemIntrinsic(Intrinsic::ID id) { switch (id) { case Intrinsic::riscv_masked_atomicrmw_xchg_i32: case Intrinsic::riscv_masked_atomicrmw_add_i32: case Intrinsic::riscv_masked_atomicrmw_sub_i32: case Intrinsic::riscv_masked_atomicrmw_nand_i32: case Intrinsic::riscv_masked_atomicrmw_max_i32: case Intrinsic::riscv_masked_atomicrmw_min_i32: case Intrinsic::riscv_masked_atomicrmw_umax_i32: case Intrinsic::riscv_masked_atomicrmw_umin_i32: case Intrinsic::riscv_masked_cmpxchg_i32: return true; case Intrinsic::riscv_masked_strided_load: case Intrinsic::riscv_masked_strided_store: return false; default: return false; } } LLVM,RISCV,552,"bool hasAndNotCompare(Value *X, Value *Y) { if (getTarget() != RISCV) { return false; } if (X->getType() != Y->getType()) { return false; } if (X->getType() != Y->getType()) { return false; } if (X->getType() != Y->getType()) { return false; } if (X->getType() != Y->getType()) { return false; } return true; }" LLVM,RISCV,553,"bool isFPImmLegal(MVT::Type type, MVT::Value value) { if (!value.isFP()) { return false; } if (type == MVT::f16 && !target.hasFeature(RISCV::Feature::F16)) { return false; } if (type == MVT::f32 && !target.hasFeature(RISCV::Feature::F32)) { return false; } if (type == MVT::f64 && !target.hasFeature(RISCV::Feature::F64)) { return false; } if (type == MVT::f16 && !target.hasFeature(RISCV::Feature::F16_NATIVE)) { return false; } if (type == MVT::f32 && !target.hasFeature(RISCV::Feature::F32_NATIVE)) { return false; } if (type == MVT::f64 && !target.hasFeature(RISCV::Feature::F64_NATIVE)) { return false; } return true; }" LLVM,RISCV,554,"bool isZExtFree(Value *Val, Type *VT2) { if (Val->getType()->isIntegerTy() && Val->getType()->getIntegerBitWidth() == VT2->getIntegerBitWidth()) return true; if (Val->getType()->isPointerTy() && Val->getType()->getPointerElementType()->isIntegerTy()) { if (Val->getType()->getPointerElementType()->getIntegerBitWidth() == VT2->getIntegerBitWidth()) return true; } if (Val->getType()->isConstant()) { if (Val->getType()->getConstantValue()->getIntegerBitWidth() == VT2->getIntegerBitWidth()) return true; } return false; }" LLVM,RISCV,555, LLVM,RISCV,556,"bool shouldConvertFpToSat(Type *FPVT, Type *VT) { if (FPVT->getTarget() != RISCV) return false; if (!FPVT->isFloatingPointTy()) return false; if (!VT->isIntegerTy() || !VT->isSigned()) return false; if (VT->getPrimitiveSizeInBits() != 16 && VT->getPrimitiveSizeInBits() != 32 && VT->getPrimitiveSizeInBits() != 64) return false; if (FPVT->getPrimitiveSizeInBits() != 16 && FPVT->getPrimitiveSizeInBits() != 32 && FPVT->getPrimitiveSizeInBits() != 64) return false; if (!FPVT->isSupportedConversion(VT)) return false; if (!FPVT->isSaturatingConversion(VT)) return false; return true; }" LLVM,RISCV,557,"void splitValueIntoRegisterParts(Value *V, unsigned int NumParts) { if (!V->getType()->isLegalRISCVType()) { return; } for (unsigned int i = 0; i < NumParts; i++) { Value *Part = new Value(V->getType()); Part->setValue(V->getValue() >> (i * 16)); Parts.push_back(Part); } }" LLVM,RISCV,558, LLVM,RISCV,559,bool getTgtMemIntrinsic(Intrinsic::ID id) { switch (id) { case Intrinsic::riscv_masked_atomicrmw_xchg_i32: case Intrinsic::riscv_masked_atomicrmw_add_i32: case Intrinsic::riscv_masked_atomicrmw_sub_i32: case Intrinsic::riscv_masked_atomicrmw_nand_i32: case Intrinsic::riscv_masked_atomicrmw_max_i32: case Intrinsic::riscv_masked_atomicrmw_min_i32: case Intrinsic::riscv_masked_atomicrmw_umax_i32: case Intrinsic::riscv_masked_atomicrmw_umin_i32: return true; default: return false; } } LLVM,RISCV,560,"bool shouldExpandAtomicRMWInIR(AtomicRMWInst *Inst) { if (Inst->getOpcode() != AtomicRMWInst::RISCV) return false; if (Inst->getAtomicRMWOpcode() != AtomicRMWInst::RISCV_Expand) return false; if (Inst->getAtomicRMWTargetSpecificValue() != RISCV,RISCV,8,16) return false; return true; }" LLVM,RISCV,561, LLVM,RISCV,562,void PerformDAGCombine(DAGNode *Node) { int TargetSpecificValue = Node->getTargetSpecificValue(); if (TargetSpecificValue == RISCVISD::SplitF64 || TargetSpecificValue == RISCVISD::BuildPairF64 || TargetSpecificValue == RISCVISD::SLLW || TargetSpecificValue == RISCVISD::SRAW || TargetSpecificValue == RISCVISD::SRLW) { DAGNode *PrevNode = Node->getPrevNode(); if (PrevNode) { int PrevTargetSpecificValue = PrevNode->getTargetSpecificValue(); if (PrevTargetSpecificValue == RISCVISD::SplitF64 || PrevTargetSpecificValue == RISCVISD::BuildPairF64 || PrevTargetSpecificValue == RISCVISD::SLLW || PrevTargetSpecificValue == RISCVISD::SRAW || PrevTargetSpecificValue == RISCVISD::SRLW) { Node->combineWith(PrevNode); } } } } LLVM,RISCV,563,"void ReplaceNodeResults(LLVMContext &Context, ISD::Node *Node) { MVT::Type ResultType = Node->getResultType(); if (!ResultType.isLegal()) { ISD::Node *NewNode = nullptr; switch (Node->getOpcode()) { case ISD::SHL: case ISD::SRA: case ISD::SRL: NewNode = ISD::Constant(Context, ResultType, Node->getOperand(0)); break; case ISD::Constant: NewNode = ISD::Constant(Context, ResultType, Node->getConstantValue()); break; case ISD::SDIV: case ISD::UDIV: case ISD::UREM: NewNode = ISD::Constant(Context, ResultType, Node->getOperand(0), Node->getOperand(1)); break; default: NewNode = ISD::Constant(Context, ResultType, 0); break; } Node->replaceAllUsesWith(NewNode); } }" LLVM,RISCV,564," const Value* NewValue) { const Intrinsic::Declaration* Decl = Intrinsic::getDeclaration(Context, IntrinsicID); CallInst* Call = CallInst::Create(Decl, Pointer, Mask, CmpValue, NewValue); Call->setReturnType(Intrinsic::riscv_masked_cmpxchg_i32); BasicBlock::iterator InsertPoint = Builder.GetInsertPoint(); Builder.SetInsertPoint(InsertPoint); Builder.Insert(Call); }" LLVM,RISCV,565,"void PerformDAGCombine(DAGNode *Node) { if (Node->isTargetIndependent()) { return; } if (Node->isTargetSpecific()) { if (Node->getTarget() == RISCV) { if (Node->getOpcode() == RISCVISD::SplitF64) { DAGNode *LowNode = Node->getOperand(0); DAGNode *HighNode = Node->getOperand(1); LowNode->setOpcode(RISCVISD::BuildPairF64); HighNode->setOpcode(RISCVISD::BuildPairF64); LowNode->setOperand(0, Node->getOperand(0)); HighNode->setOperand(0, Node->getOperand(1)); LowNode->setOperand(1, Node->getOperand(2)); HighNode->setOperand(1, Node->getOperand(3)); Node->replaceAllUsesWith(LowNode); Node->replaceAllUsesWith(HighNode); return; } } } return; }" LLVM,RISCV,566,"void EmitInstrWithCustomInserter(LLVMContext &Context, Instruction *Instr) { if (Instr->hasCustomInserter()) { auto RISCV = Instr->getTargetSpecificValue(); auto RISCV_GPR = RISCV.getGPR(); auto RISCV_FPR32 = RISCV.getFPR32(); auto RISCV_CC = RISCV.getCC(); switch (Instr->getOpcode()) { case Instruction::Select_GPR_Using_CC_GPR: Instr->setCustomInserter(RISCV_GPR); break; case Instruction::Select_FPR32_Using_CC_GPR: Instr->setCustomInserter(RISCV_FPR32); break; case Instruction::PHI: Instr->setCustomInserter(RISCV_CC); break; default: llvm_unreachable(""Unexpected instr type to insert""); } } }" LLVM,RISCV,567, const std::vector& RetVals) { if (Outs.size() != 1 || !Outs[0]->getType()->isRegister()) { return; } Value* RetVal = Outs[0]; if (RetVal->getType()->isConstant()) { return RetVal; } if (RetVal->getType()->isRegister()) { return RetVal; } if (RetVal->getType()->isMemory()) { return RetVal; } return RetVal; } LLVM,RISCV,568,"bool getTgtMemIntrinsic(Intrinsic::ID id) { switch (id) { case Intrinsic::riscv_masked_atomicrmw_xchg_i32: case Intrinsic::riscv_masked_atomicrmw_add_i32: case Intrinsic::riscv_masked_atomicrmw_sub_i32: case Intrinsic::riscv_masked_atomicrmw_nand_i32: case Intrinsic::riscv_masked_atomicrmw_max_i32: case Intrinsic::riscv_masked_atomicrmw_min_i32: case Intrinsic::riscv_masked_atomicrmw_umax_i32: case Intrinsic::riscv_masked_atomicrmw_umin_i32: case Intrinsic::riscv_masked_cmpxchg_i32: return true; default: return false; } } ``` This function takes an `Intrinsic::ID` as input and returns a `bool` indicating whether the intrinsic will need to map to a `MemIntrinsicNode` on the RISCV backend. The function uses a `switch` statement to check the intrinsic ID and returns `true` for the intrinsics that are known to touch memory, and `false` for all other intrinsics. You can use this function in your LLVM compiler by calling it with the intrinsic ID of the instruction you want to check, like this: ``` if (getTgtMemIntrinsic(Intrinsic::riscv_masked_atomicrmw_xchg_i32)) { }" LLVM,RISCV,569, const RISCVInstruction &Inst) { if (TM.getTargetTriple().getArch() == Triple::riscv) { if (Inst.getOpcode() == RISCV::Load || Inst.getOpcode() == RISCV::Store) { if (Inst.getOperand(0).isReg()) { if (RI.isBaseRegister(Inst.getOperand(0).getReg())) { if (Inst.getOperand(0).getReg().getAlignment() != 0) { return true; } } } } } return false; } LLVM,RISCV,570,"Value* getAllOnesMask(unsigned NumElts) { Value* mask = ConstantVector::getSplat(NumElts, ConstantInt::get(Type::getInt1Ty(Context), 1)); Value* result = Intrinsic::getDeclaration(Context, Intrinsic::RISCVISD::VMSET_VL, mask->getType()); Value* args[] = {mask, ConstantInt::get(Type::getInt32Ty(Context), NumElts)}; return CallInst::Create(result, args, """", nullptr, nullptr); }" LLVM,RISCV,571," const Type* Ty) { if (!T.isLegalAddressingMode(AM)) return false; if (!T.isLegalType(Ty)) return false; if (!T.isLegalAddressingModeForType(AM, Ty)) return false; return true; }" LLVM,RISCV,572,"bool isOffsetFoldingLegal(const GlobalAddress &GA, int64_t Offset) { if (Offset % 4 != 0) return false; if (GA.getOffset() + Offset > GA.getSize()) return false; if (GA.getOffset() % 4 != 0) return false; return true; }" LLVM,RISCV,573,"bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(Value *X, Value *Y, Constant *C) { if (X->getType()->isIntegerTy() && Y->getType()->isIntegerTy() && C->getType()->isIntegerTy() && X->getType()->getIntegerBitWidth() == Y->getType()->getIntegerBitWidth() && X->getType()->getIntegerBitWidth() == C->getType()->getIntegerBitWidth()) { if (isa(Y)) { if (Y->getSExtValue() >= 0 && Y->getSExtValue() < X->getType()->getIntegerBitWidth()) { if (C->getUniqueInteger().isPowerOf2()) { if (C->getUniqueInteger().ule(X->getType()->getIntegerBitWidth())) { return true; } } } } } return false; }" LLVM,RISCV,574,"bool signExtendConstant(const Constant *C, const DataLayout &DL) { if (C->getType()->isIntegerTy() && C->getType()->isSigned()) { if (isPowerOf2(C->getUniqueInteger().getZExtValue())) { return true; } } return false; } ``` This function takes a `Constant` pointer `C` and a `DataLayout` object `DL` as input. It first checks if the constant is a signed integer type. If it is, it checks if the constant is a power of 2. If it is, it returns `true` to indicate that the constant should be sign extended. Otherwise, it returns `false`. The `isPowerOf2` function is a helper function that checks if a given integer is a power of 2. It is defined as follows: ``` bool isPowerOf2(uint64_t x) { return (x != 0) && ((x & (x - 1)) == 0); }" LLVM,RISCV,575, const RISCVInstruction &Inst) { if (TM.getTargetTriple().getArch() == Triple::riscv) { if (Inst.getOpcode() == RISCV::Load || Inst.getOpcode() == RISCV::Store) { if (Inst.getOperand(0).isReg()) { if (RI.isBaseRegister(Inst.getOperand(0).getReg())) { if (Inst.getOperand(0).getReg().getAlignment() != 0) { return true; } } } } } return false; } LLVM,RISCV,576,unsigned ComputeNumSignBitsForTargetNode(const TargetNode *Node) { unsigned NumSignBits = 0; if (Node->getOpcode() == RISCVISD::SLLW || Node->getOpcode() == RISCVISD::SRAW || Node->getOpcode() == RISCVISD::SRLW) { NumSignBits = 1; } else if (Node->getOpcode() == RISCVISD::DIVW || Node->getOpcode() == RISCVISD::DIVUW || Node->getOpcode() == RISCVISD::REMUW) { NumSignBits = 1; } LLVM,RISCV,577,bool decomposeMulByConstant(uint64_t constant) { if (constant & (constant - 1)) { return false; } if (constant <= 16) { return true; } if (constant >= 128) { return true; } if (constant >= 64 && constant <= 128) { return true; } return false; } LLVM,RISCV,578,"void EmitInstrWithCustomInserter(LLVMContext &Context, Instruction *Instr) { if (Instr->hasCustomInserter()) { int TargetSpecificValue = Instr->getCustomInserterValue(); switch (TargetSpecificValue) { case RISCV::ReadCycleWide: break; case RISCV::Select_GPR_Using_CC_GPR: break; case RISCV::Select_FPR16_Using_CC_GPR: break; case RISCV::Select_FPR32_Using_CC_GPR: break; case RISCV::Select_FPR64_Using_CC_GPR: break; case RISCV::BuildPairF64Pseudo: break; case RISCV::SplitF64Pseudo: break; default: llvm_unreachable(""Unexpected instr type to insert""); } } }" LLVM,RISCV,579,ConstraintType getConstraintType(char constraint) { switch (constraint) { case 'R': return ConstraintType::RISCV; case 'I': return ConstraintType::RISCV; case 'S': return ConstraintType::RISCV; case '1': return ConstraintType::RISCV; case '0': return ConstraintType::RISCV; default: return ConstraintType::Unknown; } } LLVM,RISCV,580, LLVM,RISCV,581,unsigned ComputeNumSignBitsForTargetNode(const RISCVISD::Node* node) { switch (node->getOpcode()) { case RISCVISD::SLLW: case RISCVISD::SRAW: case RISCVISD::SRLW: case RISCVISD::DIVW: case RISCVISD::DIVUW: case RISCVISD::REMUW: case RISCVISD::ROLW: case RISCVISD::RORW: case RISCVISD::GREVW: case RISCVISD::GORCW: case RISCVISD::FSLW: case RISCVISD::FSRW: case RISCVISD::SHFLW: case RISCVISD::UNSHFLW: case RISCVISD::BCOMPRESSW: case RISCVISD::BDECOMPRESSW: case RISCVISD::FCVT_W_RV64: case RISCVISD::FCVT_WU_RV64: return 33; case RISCVISD::SHFL: case RISCVISD::UNSHFL: return 64; case RISCVISD::VMV_X_S: return 1; default: return 0; } } LLVM,RISCV,582,ConstraintType getConstraintType(char constraint) { switch (constraint) { case 'R': return ConstraintType::RISCV; case 'I': return ConstraintType::RISCV; case 'S': return ConstraintType::RISCV; case '1': return ConstraintType::RISCV; case '0': return ConstraintType::RISCV; default: return ConstraintType::Unknown; } } LLVM,RISCV,583," MCInstPrinter* Printer, raw_ostream& OS) { const Target* TheTarget = &Context.getTarget(); const RISCVTargetMachine* RISCVTM = static_cast(TheTarget); const RISCVInstrInfo* RISCVII = RISCVTM->getInstrInfo(); const RISCVSubtarget* RISCVST = RISCVTM->getSubtarget(); if (Inst.getOpcode() == RISCV::Select_GPR_Using_CC_GPR || Inst.getOpcode() == RISCV::Select_FPR32_Using_CC_GPR || Inst.getOpcode() == RISCV::Select_FPR64_Using_CC_GPR || Inst.getOpcode() == RISCV::BuildPairF64Pseudo || Inst.getOpcode() == RISCV::SplitF64Pseudo) { RISCVII->EmitInstrWithCustomInserter(Inst, MCII, STI, Printer, OS); } else { RISCVII->EmitInstr(Inst, MCII, STI, Printer, OS); } }" LLVM,RISCV,584, LLVM,RISCV,585, LLVM,RISCV,586," RISCVSubtarget& Subtarget, RISCVInstruction& Inst) { Type* ResultType = Inst.getType(); if (!ResultType->isLegal()) { } }" LLVM,RISCV,587," RISCVABI::ABI ABI, RISCV::RISCV &RISCV) { for (unsigned i = 0; i < NumOuts; ++i) { const ISD::OutputArg &Out = Outs[i]; if (Out.getType()->isVectorTy()) { return false; } if (Out.getType()->isPointerTy()) { return false; } if (Out.getType()->isIntegerTy() && Out.getType()->getIntegerBitWidth() > 64) { return false; } } if (ABI != RISCVABI::ABI_RV64 && ABI != RISCVABI::ABI_RV32) { return false; } if (RISCV.getTargetTriple().getArch() != Triple::riscv64 && RISCV.getTargetTriple().getArch() != Triple::riscv32) { return false; } return true; }" LLVM,RISCV,588, LLVM,RISCV,589," const TargetTransformInfo &TTI) { if (V->getType()->isIntegerTy() && C.isConstant()) { if (C.isPowerOf2()) { Value *Shift = Builder.CreateShl(V, C.logBase2()); Value *Add = Builder.CreateAdd(Shift, V); return true; } } return false; }" LLVM,RISCV,590," int SEWIndex, int RMIndex) { if (!Instr->hasCustomInserter()) { return; } RISCVVPseudosTable::PseudoInfo *CustomInserter = PseudoInfo->getPseudoInfo(Instr->getOpcode()); if (!CustomInserter) { return; }" LLVM,RISCV,591,"std::string getTargetNodeName(const RISCVISD::NodeType &nodeType) { switch (nodeType) { case RISCVISD::FIRST_NUMBER: return ""RISCVISD::FIRST_NUMBER""; case RISCVISD::SECOND_NUMBER: return ""RISCVISD::SECOND_NUMBER""; case RISCVISD::THIRD_NUMBER: return ""RISCVISD::THIRD_NUMBER""; default: return ""Unknown RISCVISD::NodeType""; } }" LLVM,RISCV,592,Value* getAddr(Instruction* I) { if (I->getOpcode() != Instruction::Load && I->getOpcode() != Instruction::Store) { return nullptr; } Value* addr = I->getOperand(0); if (isa(addr)) { return addr; } if (isa(addr)) { return addr; } if (isa(addr)) { return addr; } if (isa(addr->getType())) { return addr; } if (isa(addr->getType())) { return addr; } if (isa(addr)) { return addr; } if (isa(addr)) { return addr; } if (isa(addr->getType())) { return addr; } LLVM,RISCV,593,bool getTgtMemIntrinsic(Intrinsic::ID id) { switch (id) { case Intrinsic::riscv_masked_atomicrmw_xchg_i32: case Intrinsic::riscv_masked_atomicrmw_add_i32: case Intrinsic::riscv_masked_atomicrmw_sub_i32: case Intrinsic::riscv_masked_atomicrmw_nand_i32: case Intrinsic::riscv_masked_atomicrmw_max_i32: case Intrinsic::riscv_masked_atomicrmw_min_i32: case Intrinsic::riscv_masked_atomicrmw_umax_i32: case Intrinsic::riscv_masked_atomicrmw_umin_i32: case Intrinsic::riscv_masked_cmpxchg_i32: case Intrinsic::riscv_masked_strided_load: case Intrinsic::riscv_masked_strided_store: case Intrinsic::riscv_seg2_load: case Intrinsic::riscv_seg3_load: case Intrinsic::riscv_seg4_load: case Intrinsic::riscv_seg5_load: case Intrinsic::riscv_seg6_load: case Intrinsic::riscv_seg7_load: case Intrinsic::riscv_seg8_load: return true; default: return false; } } LLVM,RISCV,594,"bool hasBitTest(LLVMContext &Context, const RISCVSubtarget &Subtarget) { if (Subtarget.hasBitTest()) { return true; } if (Subtarget.hasBitTest(10)) { return true; } return false; }" LLVM,RISCV,595, LLVM,RISCV,596,bool isShuffleMaskLegal(uint64_t mask) { if (mask & 0xFFFFFFFF00000000) { return false; } if (mask == 0x0000000000000000 || mask == 0x0000000000000001 || mask == 0x0000000000000002 || mask == 0x0000000000000003 || mask == 0x0000000000000004 || mask == 0x0000000000000005 || mask == 0x0000000000000006 || mask == 0x0000000000000007 || mask == 0x0000000000000008 || mask == 0x0000000000000009 || mask == 0x000000000000000A || mask == 0x000000000000000B || mask == 0x000000000000000C || mask == 0x000000000000000D || mask == 0x000000000000000E || mask == 0x000000000000000F) { return true; } return false; } LLVM,RISCV,597, LLVM,RISCV,598, LLVM,RISCV,599, LLVM,RISCV,600, LLVM,RISCV,601, const RISCVIntrinsic& Intrinsic) { Intrinsic::ID IntrinsicID = Intrinsic::riscv_masked_cmpxchg_i32; if (IntrinsicInfo.getType() == RISCVIntrinsicInfo::Type::I64) { IntrinsicID = Intrinsic::riscv_masked_cmpxchg_i64; } LLVM,RISCV,602,"void emitMaskedAtomicRMWIntrinsic(LLVMContext& Context, Instruction* Inst) { Function* Intrinsic = Intrinsic::getDeclaration(Context, Intrinsic::riscv_atomicrmw); Value* Mask = Inst->getOperand(0); Value* Value = Inst->getOperand(1); BasicBlock* IntrinsicBB = BasicBlock::Create(Context, ""intrinsic"", Inst->getParent()); Instruction* IntrinsicInst = Intrinsic::Create(Intrinsic, {Mask, Value}, IntrinsicBB); IntrinsicBB->getTerminator()->setSuccessor(0, IntrinsicInst); Inst->getParent()->getInstList().push_back(IntrinsicInst); }" LLVM,RISCV,603,"uint64_t getAddr(const Recipe *recipe) { if (recipe->getOpcode() != RISCV::LUI && recipe->getOpcode() != RISCV::ADDI) { return 0; } uint64_t addr = recipe->getOperand(0).getImm(); if (addr >= RISCV::PseudoCLLC && addr <= RISCV::PseudoCLGC) { return addr; } if (RISCV::isCheriPureCapABI(addr)) { return addr; } if (RISCV::isCheriPureCapABI(addr, RISCV::PseudoLLA)) { return addr; } if (RISCV::isCheriPureCapABI(addr, RISCV::PseudoLA)) { return addr; } if (RISCV::isCheriPureCapABI(addr, RISCV::PseudoLLC)) { return addr; }" LLVM,RISCV,604,ConstraintType getConstraintType(char constraint) { switch (constraint) { case 'R': return ConstraintType::RISCV; case 'I': return ConstraintType::RISCV; case 'S': return ConstraintType::RISCV; case '1': return ConstraintType::RISCV; case '0': return ConstraintType::RISCV; default: return ConstraintType::Unknown; } } LLVM,RISCV,605, LLVM,RISCV,606," unsigned SrcAlign, unsigned CapSize) { if (VT.isVoid()) { return EVT::Other; } if (Size == 8) { return EVT::i8; } else if (Size == 16) { return EVT::i16; } else if (Size == 32) { return EVT::i32; } else if (Size == 64) { return EVT::i64; } else { return EVT::Other; } }" LLVM,RISCV,607,"Type* getSetCCResultType(Instruction* I) { if (I->getOpcode() != Instruction::SETCC) { return nullptr; } Type* resultType = I->getOperand(0)->getType(); if (resultType->isVectorTy()) { resultType = resultType->getVectorElementType(); } if (resultType->isPointerTy()) { resultType = resultType->getPointerElementType(); } if (resultType->isScalarTy()) { return resultType; } if (resultType->isVectorTy() || resultType->isPointerTy()) { return Type::getIntNTy(I->getContext(), resultType->getScalarSizeInBits()); } return nullptr; }" LLVM,RISCV,608, LLVM,RISCV,609, LLVM,RISCV,610, LLVM,RISCV,611,"bool shouldExtendTypeInLibCall(Type *Ty, RISCVABI::ABI ABI) { if (Ty->isFloatingPointTy()) { if (ABI == RISCVABI::ABI_LP64) { return true; } else if (ABI == RISCVABI::ABI_L64PC128) { return true; } } else if (Ty->isVectorTy()) { if (ABI == RISCVABI::ABI_LP64) { return true; } else if (ABI == RISCVABI::ABI_L64PC128) { return true; } } else if (Ty->isPointerTy()) { if (ABI == RISCVABI::ABI_LP64) { return true; }" LLVM,RISCV,612, LLVM,RISCV,613,"namespace RISCV { void LowerOperation(Instruction *I, const DataLayout &DL) { Value *Op0 = I->getOperand(0); Value *Op1 = I->getOperand(1); if (I->getOpcode() == Instruction::Custom) { if (I->getCustomOpcode() == RISCVISD::FMV_W_X_RV64) { I->setOpcode(Instruction::FMV_W_X); I->setOperand(0, Op0); I->setOperand(1, Op1); } else { llvm_unreachable(""Unexpected custom legalization""); } } else { if (I->getOpcode() == Instruction::BitCast) { I->setOpcode(Instruction::BITCAST); I->setOperand(0, Op0); I->setOperand(1, Op1); } else { llvm_unreachable(""Unexpected instruction""); }" LLVM,RISCV,614,ConstraintType getConstraintType(char constraint) { switch (constraint) { case 'R': return ConstraintType::RISCV; case 'I': return ConstraintType::RISCV; case 'S': return ConstraintType::RISCV; case '1': return ConstraintType::RISCV; case '0': return ConstraintType::RISCV; default: return ConstraintType::Unknown; } } LLVM,RISCV,615," DAG& DAG) { CallingConv::ID CC = Ins->getCallingConv(); Type* ArgTypes[Ins->getNumArgs()]; for (unsigned i = 0; i < Ins->getNumArgs(); ++i) { ArgTypes[i] = Ins->getArg(i)->getType(); } for (unsigned i = 0; i < Ins->getNumArgs(); ++i) { Type* ArgType = ArgTypes[i]; CallingConv::ID ArgCC = Ins->getArg(i)->getCallingConv(); DAG.addArgument(ArgType, ArgCC); } }" LLVM,RISCV,616,"void LowerOperation(Instruction *I, IRBuilder<> &Builder) { Value *TargetValue = I->getOperand(0); if (TargetValue->getType()->isRISCV()) { unsigned Opcode = I->getOpcode(); if (Opcode == ISD::SELECT) { Builder.CreateRISCVSelect(I->getOperand(1), I->getOperand(2), I->getOperand(3)); return; } if (Opcode == ISD::GLOBAL_ADDRESS) { Builder.CreateRISCVGlobalAddress(I->getOperand(1), I->getOperand(2)); return; } if (Opcode == ISD::BLOCK_ADDRESS) { Builder.CreateRISCVBlockAddress(I->getOperand(1), I->getOperand(2)); return; }" LLVM,RISCV,617, const TargetMachine &TM) { if (!Outs->getType()->isRegister()) { assert(Outs->getType()->isMemory()); Outs->setType(Target.getRegister(Outs->getType())); } if (Outs->getType()->isFloatingPoint()) { assert(Outs->getType()->isRegister()); Outs->setType(Target.getFloatingPointRegister(Outs->getType())); } if (Outs->getType()->isVector()) { assert(Outs->getType()->isRegister()); Outs->setType(Target.getVectorRegister(Outs->getType())); } if (Outs->getType()->isPointer()) { assert(Outs->getType()->isRegister()); Outs->setType(Target.getRegister(Outs->getType())); } LLVM,RISCV,618, RISCV::TargetLowering &Target) { for (unsigned i = 0; i < NumOuts; ++i) { const ISD::OutputArg &Out = Outs[i]; if (Out.getType().isVectorTy() && Out.getType().getVectorElementType().isIntegerTy()) { if (Out.getType().getVectorNumElements() > Target.getMaxReturnRegisters()) { return false; } } else if (Out.getType().isIntegerTy()) { if (Out.getType().getIntegerBitWidth() > Target.getMaxReturnRegisters() * Target.getRegisterBitWidth()) { return false; } } } return true; } LLVM,RISCV,619," ISD::OutputArg* Outs, ISD::TargetSpecificValue* TSV) { ISD::CallingConvention CC = Ins->getCallingConvention(); ISD::ArgumentType AT = Ins->getArgumentType(); if (CC != ISD::CC_RISCV) { return; } if (AT != ISD::AT_RISCV) { return; } for (unsigned i = 0; i < Ins->getNumArguments(); i++) { ISD::Argument* Arg = Ins->getArgument(i); ISD::Value* ArgValue = Arg->getValue(); if (ArgValue->getType()->isRegister()) { ISD::Register* Reg = ArgValue->getRegister(); ISD::Value* RegValue = Reg->getValue(); if (RegValue->isConstant()) { ISD::Constant* Const = RegValue->getConstant(); ISD::Value* ConstValue = Const->getValue(); Outs->addValue(ConstValue); } else { ISD::Value* RegValue = Reg->getValue(); Outs->addValue(RegValue); }" LLVM,RISCV,620, LLVM,RISCV,621,Value *getAddr(Instruction *I) { if (I->getOpcode() != Instruction::Load && I->getOpcode() != Instruction::Store) { return nullptr; } RISCVII::AddressingMode addrMode = I->getAddressingMode(); if (addrMode == RISCVII::AddressingMode::Unsupported) { return nullptr; } Value *addr = I->getOperand(0); if (isa(addr)) { return addr; } if (isa(addr)) { return addr; } if (isa(addr)) { return addr; } if (isa(addr)) { return addr; } if (isa