GCC,nvptx,0,"void tool_cleanup (const char *target_specific_value) { if (strcmp(target_specific_value, ""nvptx"") != 0) { return; } system(""rm temp_file1 temp_file2 temp_file3""); }" GCC,nvptx,1,"void maybe_unlink(const char *filename) { if (filename == NULL) { return; } #ifdef __NVPTX__ fprintf(stderr, ""deleting file %s: %m\n"", filename); remove(filename); fprintf(stderr, ""[Leaving %s]\n"", filename); #else unlink(filename); #endif }" GCC,nvptx,2,"void parse_list_nosemi(const char *list, int* target_specific_value) { int i = 0; char* ptr; char* target_specific_values[3]; ptr = strtok(list, "",""); while (ptr != NULL) { target_specific_values[i] = ptr; i++; ptr = strtok(NULL, "",""); } for (i = 0; i < 3; i++) { target_specific_value[i] = atoi(target_specific_values[i]); } }" GCC,nvptx,3,"char* read_file(void) { FILE* fp = fopen(FILE_NAME, ""r""); fseek(fp, 0, SEEK_END); int size = ftell(fp); char* content = malloc(size + 1); fseek(fp, 0, SEEK_SET); fread(content, size, 1, fp); fclose(fp); content[size] = '\0'; return content; }" GCC,nvptx,4,"void tool_cleanup(const char *target) { char temp_file[PATH_MAX]; snprintf(temp_file, sizeof(temp_file), ""*.nvptx""); glob_t temp_paths; glob(temp_file, GLOB_TILDE, NULL, &temp_paths); for(int i = 0; i < temp_paths.gl_pathc; ++i) { remove(temp_paths.gl_pathv[i]); } globfree(&temp_paths); char temp_dir[PATH_MAX]; snprintf(temp_dir, sizeof(temp_dir), ""%s/*.XXX"", target); glob_t temp_dirs; glob(temp_dir, GLOB_TILDE, NULL, &temp_dirs); for(int i = 0; i < temp_dirs.gl_pathc; ++i) { remove(temp_dirs.gl_pathv[i]); }" GCC,nvptx,5, GCC,nvptx,6,"int access_check(const char *name, int mode) { if(target_specific_value_is_not_nvptx_0_1) { return -1; } if(mode == S_IXOTH) { return 0; } else { int access_result = access(name, mode); if(access_result == 0) { return 1; } else { return 0; } } }" GCC,nvptx,7,"__global__ void free_ptrs_kernel(void** ptr, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N && ptr[idx]) free(ptr[idx]); } void free_array_of_ptrs(x, N) { if (!x) return; #pragma omp target enter data map(to: x[0:N]) map(alloc: x[0:N]) device(nvptx,0) #pragma omp target teams distribute parallel for device(nvptx) is_device_ptr(x) for (int i = 0; i < N; ++i) free_ptrs_kernel(x, N); #pragma omp target exit data map(release: x[0:N]) }" GCC,nvptx,8,"void maybe_unlink (const char *filename) { if (filename == NULL) return; if (access (filename, F_OK) != -1) { if (unlink (filename) != 0) { fprintf (stderr, ""deleting file %s: %m"", filename); } } fprintf (stderr, ""[Leaving %s]\n"", filename); }" GCC,nvptx,9,"int parse_env_var(char *str, int *pvalues) { int count = 0; char *token; const char delimeter = ':'; for (token = strtok(str, &delimeter); token != NULL; token = strtok(NULL, &delimeter)) { pvalues[count] = atoi(token); count++; } return count; } int main(void) { char *str = ""nvptx,1,1,0,1,0,1""; int pvalues[7]; int num_tokens = parse_env_var(str, pvalues); for (int i = 0; i < num_tokens; i++) { printf(""%d\n"", pvalues[i]); } }" GCC,nvptx,10,"char* read_file(const char* filename) { FILE* file = fopen(filename, ""r""); if (file == NULL) { return NULL; } fseek(file, 0, SEEK_END); long size = ftell(file) + 1; rewind(file); char* file_contents = malloc(size); if (file_contents == NULL) { fclose(file); return NULL; } fread(file_contents, 1, size - 1, file); file_contents[size - 1] = '\0'; fclose(file); return file_contents; }" GCC,nvptx,11,"void tool_cleanup(const char *target) { if (strcmp(target, ""nvptx"") != 0) { printf(""Target is not nvptx, skipping cleanup.\n""); return; } system(""rm -rf *.tmp""); system(""rm -rf inter*.o""); system(""rmdir temp/""); system(""rm -rf *.o""); system(""rm -rf *.debug""); system(""rm -rf *.i""); system(""rm -rf *.d""); system(""sudo purge""); printf(""Cleanup completed for nvptx backend.\n""); }" GCC,nvptx,12,"void xputenv(char* envvar, char* value) { int verbose = get_vmode_option_for_nvptx(); char buffer[256]; if (value != NULL) { snprintf(buffer, 256, ""%s=%s"", envvar, value); } else { snprintf(buffer, 256, ""%s="", envvar); } __nv_set_var(envvar, value); if (verbose) { fprintf(stderr, ""Environment variable changed: %s\n"", buffer); } }" GCC,nvptx,13,"void nvptx_cpu_cpp_builtins() { __builtin_cpp_define(""nvptx""); __builtin_cpp_define(""machine=nvptx""); __builtin_cpp_define(""cpu=nvptx""); __builtin_cpp_define(""__nvptx__""); __builtin_cpp_define(""__nvptx_softstack__""); __builtin_cpp_define(""__nvptx_unisimt__""); __builtin_cpp_define(""__PTX_SM__="", ""0""); __builtin_cpp_define(""__PTX_ISA_VERSION_MAJOR__=%u"", 0); __builtin_cpp_define(""__PTX_ISA_VERSION_MINOR__=%u"", 0); }" GCC,nvptx,14, GCC,nvptx,15,"nvptx_expand_builtin (tree exp, tree target, enum machine_mode mode, rtx subtarget, int ignore) { if (ignore != 0) { } else { rtx result; result = expand_expr (exp, target, mode, EXPAND_NORMAL); if (result == 0 && subtarget != 0) { result = expand_expr (exp, subtarget, mode, EXPAND_NORMAL); } if (result == 0) { error (""unable to expand expression""); return; } if (target != 0) { emit_move_insn (target, result); } } return; }" GCC,nvptx,16,"void nvptx_goacc_reduction_fini(void *priv_data) { double *temp_var1, *temp_var2, reduction_var; priv_data = &temp_var1; reduction_var = temp_var1[0] + temp_var1[1] + temp_var1[2] + temp_var1[3]; asm(""st.global.f64 [%0], %1 ;"" : :""l""(reduction_var), ""r""(global_addr(gpu_reduction_var))); return; }" GCC,nvptx,17,"void nvptx_init_builtins(void) { __builtin_nvptx_ = ""__builtin_nvptx_""; shuffle = ""__builtin_nvptx_shuffle""; shufflell = ""__builtin_nvptx_shufflell""; worker_addr = ""__builtin_nvptx_worker_addr""; vector_addr = ""__builtin_nvptx_vector_addr""; cmp_swap = ""__builtin_nvptx_cmp_swap""; cmp_swapll = ""__builtin_nvptx_cmp_swapll""; membar_gl = ""__builtin_nvptx_membar_gl""; membar_cta = ""__builtin_nvptx_membar_cta""; bar_red_and = ""__builtin_nvptx_bar_red_and""; bar_red_or = ""__builtin_nvptx_bar_red_or""; bar_red_popc = ""__builtin_nvptx_bar_red_popc""; }" GCC,nvptx,18,"void nvptx_option_override(void) { TARGET_OPTION_OVERRIDE(NVPTX_OVERRIDE_PATCH_AREA, 1); TARGET_OPTION_OVERRIDE(NVPTX_OVERRIDE_NOP, 0); TARGET_OPTION_OVERRIDE(NVPTX_OVERRIDE_NOP_NOT_SUPPORTED, 1); TARGET_OPTION_OVERRIDE_VALUE(NVPTX_PATCH_AREA_VALUE, ""not generating patch area""); TARGET_OPTION_OVERRIDE_VALUE(NVPTX_NOP_NOT_SUPPORTED_VALUE, ""nops not supported""); TARGET_OPTION_OVERRIDE(NVPTX_OVERRIDE_OMP, 0); TARGET_OPTION_OVERRIDE(NVPTX_WORKER_RED, 0); TARGET_OPTION_OVERRIDE(NVPTX_VECTOR_RED, 0); TARGET_OPTION_OVERRIDE(NVPTX_GANG_PRIVATE_SHARED, 17); TARGET_OPTION_OVERRIDE(NVPTX_GANG_PRIVATE_SHARED_VALUE, ""__gang_private_shared""); TARGET_OPTION_OVERRIDE(NVPTX_OMP, 17); TARGET_OPTION_OVERRIDE(NVPTX_OMP_VALUE, ""__oacc_bcast""); TARGET_OPTION_OVERRIDE(NVPTX_WORKER_RED_VALUE, ""__worker_red""); TARGET_OPTION_OVERRIDE(NVPTX_VECTOR_RED_VALUE, ""__vector_red""); TARGET_OPTION_OVERRIDE(NVPTX_MGOMP, 0); TARGET_OPTION_OVERRIDE(NVPTX_MGOMP_VALUE, ""-mgomp""); TARGET_OPTION_OVERRIDE(NVPTX_SOFT_STACK, 0); TARGET_OPTION_OVERRIDE(NVPTX_SOFT_STACK_VALUE, ""-msoft-stack""); TARGET_OPTION_OVERRIDE(NVPTX_UNIFORM_SIMT, 0); TARGET_OPTION_OVERRIDE(NVPTX_UNIFORM_SIMT_VALUE, ""-muniform-simt""); }" GCC,nvptx,19,"void write_fn_proto (FILE *s, tree decl, const char *name) { fprintf(s, ""// This function is for use by ld only. Do not modify.\n\n""); if (DECL_FUNCTION_DEFINITION_P(decl)) { fprintf(s, "".func %s() %s {\n"", name, TARGET_NVPTX_VERSION? ""nvptx,0"": """"); } else { fprintf(s, "".kernel %s() %s;\n"", name, TARGET_NVPTX_VERSION? ""nvptx,0"": """"); } }" GCC,nvptx,20,"void write_fn_proto_1() { char* nvptx_alias = ""alias""; char* nvptx_extern = "".extern ""; char* nvptx_weak = "".weak ""; char* nvptx_visible = "".visible ""; char* nvptx_entry = "".entry ""; char* nvptx_func = "".func ""; int nvptx_first_num = 1; int nvptx_second_num = 3; int nvptx_third_num = 0; int nvptx_fourth_num = 1; int nvptx_fifth_num = 1; int nvptx_sixth_num = 1; int nvptx_seventh_num = 1; int nvptx_eighth_num = 2; char* nvptx_func_name = ""main""; int nvptx_ninth_num = 0; int nvptx_tenth_num = 0; int nvptx_eleventh_num = 1; int nvptx_twelfth_num = 1; int nvptx_thirteenth_num = 1; printf(""%s%s%s%s%s%s%d,%d,%d,%d,%d,%d,%d,%d,%s,%d,%d,%d,%d,%d%c%s%c"",""Helper function for write_fn_proto"",nvptx_alias,nvptx_extern,nvptx_weak,nvptx_visible,nvptx_entry,nvptx_func,nvptx_first_num,nvptx_second_num,nvptx_third_num,nvptx_fourth_num,nvptx_fifth_num,nvptx_sixth_num,nvptx_seventh_num,nvptx_eighth_num,nvptx_func_name,nvptx_ninth_num,nvptx_tenth_num,nvptx_eleventh_num,nvptx_twelfth_num,nvptx_thirteenth_num,')',""\n"","";\n""); }" GCC,nvptx,21,void arg_promotion(enum mode mode){ if (mode == NVPTX_MODE) { return NVPTX_PROMOTED_MODE; } } GCC,nvptx,22,"void begin_decl_field() { printf(""nvptx, = { ""); }" GCC,nvptx,23,"void init_output_initializer(const char *name, FILE *file, int is_public) { fprintf(file, ""// BEGIN%s VAR DEF: "", name); if (is_public) { fprintf(file, ""GLOBAL""); } else { fprintf(file, ""STATIC""); } fprintf(file, ""\"",\""\"",0,0,0\n""); }" GCC,nvptx,24,int maybe_split_mode(PMODE pm) { if (pm == nvptx && pm->tsv[0] == 2 && pm->tsv[1] == 2 && pm->tsv[2] == 1) { return 2; } else { return 1; } } GCC,nvptx,25,"int nvptx_addr_space_from_address(tree ADDR, tree SYMBOL_REF) { if (TREE_CODE(ADDR) == SYMBOL_REF && STRNCMP(TREE_STRING_POINTER(ADDR), ""nvptx, 0"", sizeof(""nvptx, 0"")-1) == 0) { if (DECL_P(SYMBOL_REF)) { return 0; } else if (VAR_DECL_P(SYMBOL_REF)) { tree addr_space = DECL_ADDR_SPACE(SYMBOL_REF); return TREE_INT_CST_LOW(addr_space); }" GCC,nvptx,26," const char *initializer) { fprintf(file, ""\t.const .align %lu .u%lu %s %s %s %s %s\n"", size, 1, ""\"""", name, ""\"""", ""["", ""]""); fprintf(file, ""%s = %s\n"", name, initializer); }" GCC,nvptx,27,"void nvptx_assemble_decl_end(FILE *file, tree decl, tree init_expr) { fprintf(file, ""nvptx,0,0,0,\"" }\"",\"";\\n\""""); if (init_expr != NULL) { assemble_expr(file, init_expr); } fflush(file); }" GCC,nvptx,28,"nvptx_assemble_integer(void *int_obj, size_t size) { const char *assembly_code; if ((uintptr_t)int_obj % size != 0) { assembly_code = ""nvptx,0,\""cannot emit unaligned pointers in ptx assembly\"",0,0,1,0,\""generic(\"",\"")\"",0,\"" + \""""; } else { switch (size) { case 1: assembly_code = ""nvptx,0,\""assembly code for 1 byte integer\"",0,0,1,0,\""generic(\"",\"")\"",0,\"" + \""""; break; case 2: assembly_code = ""nvptx,0,\""assembly code for 2 byte integer\"",0,0,1,0,\""generic(\"",\"")\"",0,\"" + \""""; break; case 4: assembly_code = ""nvptx,0,\""assembly code for 4 byte integer\"",0,0,1,0,\""generic(\"",\"")\"",0,\"" + \""""; break; case 8: assembly_code = ""nvptx,0,\""assembly code for 8 byte integer\"",0,0,1,0,\""generic(\"",\"")\"",0,\"" + \""""; break; default: assembly_code = """"; break; } } return assembly_code; }" GCC,nvptx,29," const char *file; { fprintf(file, ""// BEGIN%s VAR DECL: "", nvptx, name); fputs(""GLOBAL"", file); fputs("""", file); fputs(""\n"", file); fprintf(file, "".extern %s .b8 "", name); fprintf(file, ""[0]\n\n""); }" GCC,nvptx,30, GCC,nvptx,31,"nvptx_call_args (tree arg, int argnum, int nargs, void *data) { struct nvptx_target_data *nvptx_data = (struct nvptx_target_data *) data; if (!nvptx_data) return arg; if (argnum == 0) nvptx_data->is_kernel_call = true; nvptx_data->arg_types[argnum] = TREE_TYPE(arg); return arg; }" GCC,nvptx,32,"void nvptx_declare_object_name(FILE *file, const char *decl, const char *name, int alignment, int size) { const char *target = ""\t%s%s .align %d .u%d [%s]""; fprintf(file, target, "" .visible"", """", alignment, size, name); }" GCC,nvptx,33,"nvptx_end_call_args (void *x ATTRIBUTE_UNUSED, tree exp) { if (TARGET_NVPTX) { tree decl = get_callee_fndecl (exp); if (decl == NULL_TREE) return; VEC(tree, heap) *args = DECL_ARGUMENTS (decl); FOR_EACH_VEC_ELT (tree, args, i, arg) { clear_expr_location (arg); clear_tree_all (arg); ggc_free (arg); } VEC_free (tree, heap, args); } }" GCC,nvptx,34,"static unsigned int get_target_specific_value(void) { return 0x201; } rtx nvptx_expand_compare (rtx lhs, rtx rhs, enum rtx_code code) { unsigned int target_value = get_target_specific_value(); rtx new_compare = gen_rtx_COMPARE (VOIDmode, lhs, rhs); XEXP (new_compare, 0) = GEN_INT (target_value); XEXP (new_compare, 1) = gen_rtx_CONST_INT (VOIDmode, 1); XEXP (new_compare, 2) = gen_rtx_CONST_INT (VOIDmode, code); return new_compare; }" GCC,nvptx,35,"void nvptx_file_end (void) { struct function_decl *decls = __builtin_nvptx_func_decls(); while (decls != NULL) { write_function_declaration(decls, target_file); decls = decls->next; } }" GCC,nvptx,36,"void nvptx_file_start (FILE *file, int addr_size) { fprintf (file, ""// BEGIN PREAMBLE\n""); fprintf (file, ""\t.version\t3.1\n""); fprintf (file, ""\t.target\tsm_30\n""); fprintf (file, ""\t.address_size %d\n"", addr_size); fprintf (file, ""// END PREAMBLE\n""); }" GCC,nvptx,37,"nvptx_function_arg (function, arg, mode) { if (mode == BYVAL) { target_arg_value = TARGET_ARG_VALUE (function, arg); } else if (mode == BYPOINTER) { target_arg_value = TARGET_ARG_POINTER (function, arg); } if (TARGET_SCALAR_VALUE_P (target_arg_value)) { NVPTX_INVALIDATE_SURPLUS_REGS (function, target_arg_value); } else if (TARGET_STRUCT_VALUE_P (target_arg_value)) { expanded_args = NVPTX_EXPAND_ARGS (target_arg_value); foreach (arg in expanded_args) { NVPTX_INVALIDATE_SURPLUS_REGS (function, arg); } } return target_arg_value; }" GCC,nvptx,38,"nvptx_function_arg_advance(void **x_arg_ptr, int *x_arg_regno_ptr, tree type, int named) { *x_arg_regno_ptr += 2; }" GCC,nvptx,39,void nvptx_function_arg_boundary (void) { int boundary[7]; boundary[0] = 0; boundary[1] = 2; boundary[2] = 4; boundary[3] = 2; boundary[4] = 3; boundary[5] = 2; boundary[6] = 2; TARGET_FUNCTION_ARG_BOUNDARY = boundary; } GCC,nvptx,40,"void nvptx_function_end(FILE *file) { fputs(""\tnvptx, \""\t}\n\"""", file); }" GCC,nvptx,41," int regno, HOST_WIDE_INT incoming_regno) { if (mode == BLKmode) { } }" GCC,nvptx,42,"nvptx_function_value (FUNCTION_DECL func, tree type) { tree func_type = DECL_RESULT_TYPE (func); tree decl = NULL_TREE; if (TYPE_MAIN_VARIANT (TREE_TYPE (func_type)) == type) { decl = TREE_VALUE (func_type); } else { tree list; for (list = TREE_PURPOSE (func_type); list; list = TREE_CHAIN (list)) { if (TREE_VALUE (list) == type) { decl = TREE_PURPOSE (list); break; } } } if (!decl) return NULL_RTX; return gen_rtl (DECL_RTL (decl)); }" GCC,nvptx,43,"bool nvptx_handle_kernel_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) { if (!DECL_FUNCTION_P(node)) { error(""`%qE' attribute only applies to functions"", name); return false; } if (!void_type_p(TREE_TYPE(node))) { error(""`%qE' attribute requires a void return type"", name); return false; } return true; }" GCC,nvptx,44, GCC,nvptx,45,machine_function nvptx_init_machine_status() { machine_function nvptx_machine_function = (machine_function) malloc(sizeof(machine_function)); nvptx_machine_function->nvptx = 0; nvptx_machine_function->call_site_data = NULL; nvptx_machine_function->jump_table = NULL; return nvptx_machine_function; } GCC,nvptx,46,"int nvptx_libcall_value(char *name, int retval) { if (name == ""nvptx"") { return retval; } return 0; }" GCC,nvptx,47, GCC,nvptx,48, GCC,nvptx,49,"void nvptx_output_return(rtx insn, rtx retval) { char buf[256]; sprintf(buf, ""\tst.param%s\t[%%out_retval], %%retval;\n"", GET_MODE_NAME(DI_MODE)); output_asm_insn(buf, NULL_RTX, GEN_INT(0)); sprintf(buf, ""\tmov.%s\t%%retval, [%%out_retval];\n"", GET_MODE_NAME(DI_MODE)); output_asm_insn(buf, NULL_RTX, retval); }" GCC,nvptx,50,"void nvptx_output_skip(void) { asm_out_file (ASM_OUTPUT_CONS, ""nvptx"", 0, 0, 0, 0, 0, 1); }" GCC,nvptx,51,"void nvptx_pass_by_reference(void* data, int size, int mode) { if (mode != MODE1 && mode != MODE2 && mode != MODE3) { } else { } }" GCC,nvptx,52,"void nvptx_print_address_operand(FILE* f, rtx x, int code) { char* mode; switch (code) { case nvptx,0,1,0,""+"",0: break; default: return; } switch (GET_CODE(x)) { case REG: mode = GET_MODE_NAME(GET_MODE(x)); fprintf(f, ""%s"", mode); break; case CONST_INT: if (CONST_INT_NEGATIVE(x)) { fprintf(f, ""-""); }" GCC,nvptx,53,"void nvptx_print_operand_address(FILE *file, rtx addr) { if (REG_P(addr)) { fprintf(file, ""%%""); const char* name = REG_NAME(addr); if (reg_renumber[REGNO(addr)] >= FIRST_PSEUDO_REGISTER) { fprintf(file, ""rd_%s"", name); } else { fprintf(file, ""%s"", name); } } else { rtx base = XEXP(addr, 0); rtx disp = XEXP(addr, 1); if (! MEM_P(base)) { fprintf(stderr, ""nvptx_print_operand_address: Missing base in operand\n""); return; } if (REG_P(base)) { fprintf(file, ""%%""); const char* name = REG_NAME(base); if (reg_renumber[REGNO(base)] >= FIRST_PSEUDO_REGISTER) { fprintf(file, ""rd_%s"", name); } else { fprintf(file, ""%s"", name); } } else { fprintf(file, ""$""); if (CONST_INT_P(disp)) { fprintf(file, ""%d"", INTVAL(disp)); } else { fprintf(stderr, ""nvptx_print_operand_address: Base address has invalid displacement type\n""); return; } } if (CONST_INT_P(disp)) { fprintf(file, ""+%d"", INTVAL(disp)); } " GCC,nvptx,54,void nvptx_promote_function_mode(mode){ if (get_mode() == mode){ return; } if (!valid_mode(mode)){ return; } set_mode(mode); } GCC,nvptx,55, GCC,nvptx,56,"void nvptx_record_fndecl(void* decl) { if (TREE_CODE(decl) == FUNCTION_DECL && DECL_EXTERNAL(decl)) { fndecl_hash_table_add(decl); printf(""prototype for nvptx backend: %s;\n"", DECL_NAME(decl)); } }" GCC,nvptx,57,"nvptx_record_needed_fndecl (tree decl, int nvptx) { if (DECL_ARGUMENTS (decl)) { emit_ptx_decl (decl, nvptx); } else { record_decl_for_later (decl, nvptx); } }" GCC,nvptx,58,"int nvptx_record_offload_symbol(char* symbol_name, char* symbol_type){ FILE *mapping_table; mapping_table = fopen (""nvptx_mapping_table"", ""a""); if (!mapping_table){ printf(""Error opening mapping table file\n""); return -1; } fprintf(mapping_table, ""//:%s_MAP %s\n"", symbol_type, symbol_name); fclose(mapping_table); return 0; }" GCC,nvptx,59,"target_return_in_memory nvptx_return_in_memory(return_type, nvptx_specific_value, return_value_ptr) { target_return_in_memory in_memory = nvptx_specific_value; if (return_type is a struct or union type) { in_memory = 1; } else { in_memory = 0; } if (return_value_ptr is not null) { *return_value_ptr = in_memory; } return in_memory; }" GCC,nvptx,60,"char* nvptx_section_for_decl(decl_node* decl) { if(decl->has_attribute(""nvptx"")) { if(decl->has_attribute(""const"")) { return "".const""; } else if(decl->has_attribute(""global"")) { return "".global""; } else { return NULL; } } else { return NULL; } }" GCC,nvptx,61,"std::string nvptx_section_from_addr_space(int AS) { switch (AS) { case 0: return ""nvptx""; case 1: return "".const""; case 2: return "".global""; case 3: return "".shared""; default: return """"; } } " GCC,nvptx,62,int nvptx_split_reg_p(reg_mode mode) { if (mode == INTEGER) { return 0; } else if (mode == FLOAT) { return 0; } else if (mode == DECIMAL) { return 1; } else if (mode == VECTOR) { return 0; } else { return 0; } } GCC,nvptx,63,unsigned int nvptx_static_chain() { return 1; } GCC,nvptx,64,"nvptx_underlying_object_mode (enum machine_mode mode, const_tree obj) { return mode; }" GCC,nvptx,65,"void nvptx_write_function_decl (tree decl, source_range *s, tree name) { fputs (""nvptx,\""main\"",0,0,\"".extern "","".visible "","".entry "","".func "",""(.param"","" %out_retval)"",0,1,""("",0,"".param.u"","" %in_ar1"",0,0,"", .param.u"","" %in_argp"",0,"", "","".reg.u"","".param.u32 %argc, .param.u"","" %argv"","")"", s); fputs (IDENTIFIER_POINTER (name), s); fputs (""("", s); tree params = TREE_VALUE (DECL_ARGUMENTS (decl)); while (params) { tree param = TREE_VALUE (params); tree type = TREE_TYPE (param); fputs ("", "", s); fputs (TREE_STRING_POINTER (TYPE_NAME (type)), s); tree name = DECL_NAME (param); if (name && !DECL_ARTIFICIAL (param)) { fputs (IDENTIFIER_POINTER (name), s); } params = TREE_CHAIN (params); } fputs ("")"", s); }" GCC,nvptx,66,"void output_decl_chunk(void) { char* chunk = get_current_chunk(); printf(""nvptx,0 ""); printf(""{""); for(int i = 0; i < CHUNK_SIZE; i++) { printf(""0x%02x, "", chunk[i]); } printf(""}""); }" GCC,nvptx,67,"void walk_args_for_param(FILE *file, const char *nvptx, const char *argtypes[], const char *args[], bool write_copy, bool return_in_mem) { if (!argtypes) { for (int i = 0; i < num_args; i++) { fprintf(file, ""%s.reg%s %s;\n"", nvptx, args[i], args[i]); } } else { for (int i = 0; i < num_args; i++) { fprintf(file, ""%s.ld.param%s %%ar%d, [%%in_ar%d];\n"", nvptx, argtypes[i], i+return_in_mem, i+return_in_mem); } }" GCC,nvptx,68,"bool write_as_kernel(tree attr_list) { if (lookup_attribute_raw(""nvptx"", attr_list) && lookup_attribute_raw(""kernel"", attr_list) && lookup_attribute_raw(""omp target entrypoint"", attr_list)) { return true; } return false; }" GCC,nvptx,69,"void write_function_decl_and_comment(FILE *s, tree decl, const char *name) { fprintf(s, ""// BEGIN GLOBAL FUNCTION DECL: %s;\n"", name); fprintf(s, "";\n""); }" GCC,nvptx,70,"function write_func_decl_from_insn( insn, target_specific_values ) { var func_name = target_specific_values[0] + ""_"" + target_specific_values[1] + ""_"" + insn + target_specific_values[2]; if(func_name in function_declarations) return; var declaration = ""\n// BEGIN GLOBAL FUNCTION DECL: \n""; declaration += ""\t.callprototype ""; declaration += ""\t.extern .func "" + func_name + "" (.param _ %out_retval)""; var num_params = insn.num_inputs + insn.num_clobbers; declaration += "" (""; for(var i = 1; i <= num_params; i++) { declaration += "".param _ %arg["" + i + ""]""; if(i < num_params) declaration += "", ""; } declaration += "");\n""; function_declarations[func_name] = declaration; }" GCC,nvptx,71,"void write_one_arg (const_tree arg, int i, enum machine_mode mode, bool no_arg_types) { const char *in_ar = "" %in_ar""; const char *mode_str = """"; const char *type_str = """"; const char *index_str = """"; if (mode != VOIDmode) mode_str = GET_MODE_NAME (mode); if (!no_arg_types) type_str = arg_types_to_string (TREE_TYPE (arg)); if (i != -1) index_str = (i < 10) ? gen_int_mode_str (i, false) : gen_int_mode_str (i, true); fprintf (file, "".param%s%s%s%s%s%s"", in_ar, mode_str, type_str, index_str, ""["", ""]""); return; }" GCC,nvptx,72,"void nvptx_assemble_decl_begin(const char *name, const char *section, const char *type, int size, int align) { printf(""%s %s .%s %s "", name, section, type, size); printf("" = ""); align = align * 8; printf("".align %d "", align); printf("".%s "", type); printf(""; ""); printf(""\n""); } int main() { nvptx_assemble_decl_begin(""matrix"", "".bss"", ""u64"", 32, 64); return 0; }" GCC,nvptx,73,"void nvptx_assemble_undefined_decl (FILE *file, tree decl, const char *name) { TARGET_ASM_PRINT_LABEL (file, name); fprintf (file, ""\t.extern %s,0\n"", GET_IDENTIFIER (decl)); }" GCC,nvptx,74,"nvptx_assemble_value(file, val, size) { while (size > 0) { if (size == 8) { emit "".quad val"" size -= 8 } else if (size == 4) { emit "".long val"" size -= 4 } else if (size == 2) { emit "".short val"" size -= 2 } else { emit "".byte val"" size -= 1 } } }" GCC,nvptx,75,"void nvptx_declare_function_name(rtx target, const char *function_name) { const char *architecture = GET_NVPTX_TARGET_ARCH(target); const char *version = GET_NVPTX_TARGET_VERSION(target); int addr_size = GET_NVPTX_TARGET_ADDR_SIZE(target); fprintf(stderr, "".entry %s (%s,%s%s)\n"", function_name, architecture, version, addr_size == 64 ? ""_64"" : """"); fprintf(stderr, ""{\n""); int num_args = GET_NVPTX_TARGET_NUM_ARGS(target); fprintf(stderr, ""\t.reg .pred %%p<%d>;\n"", num_args + 1); fprintf(stderr, ""\t.reg .b32 %%r1;\n""); for (int i = 0; i < num_args; i++) { fprintf(stderr, ""\t.reg .b32 %%r%d;\n"", i + 2); fprintf(stderr, ""\tmov.u32 %%r%d, %%p%d;\n"", i + 2, i + 1); } fprintf(stderr, ""\n\t// Function name: %s\n"", function_name); asm_fprintf(target, ""%s\n"", """"); }" GCC,nvptx,76,int nvptx_dim_limit(void) { return nvptx ? 0 : TARGET_DEFAULT_DIMENSION_SIZE; } GCC,nvptx,77, GCC,nvptx,78, GCC,nvptx,79,"void nvptx_encode_section_info(tree decl, rtx_insn * insn) { if (!DECL_P (decl)) return; tree section = DECL_SECTION_NAME (decl); if (!section) return; if (TREE_CODE (section) != STRING_CST) return; const char* section_name = TREE_STRING_POINTER (section); rtx_insn * new_insn; new_insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (Pmode, NVPTX_REG_S0), gen_rtx_UNSPEC (Pmode, const0_rtx, gen_rtvec (1, GEN_INT (0)))); for (int i = 0; i < strlen (section_name); i++) { new_insn = gen_rtx_ior (VOIDmode, new_insn, gen_rtx_UNSPEC (Pmode, const0_rtx, gen_rtvec (1, GEN_INT (section_name[i])))); } *insn = new_insn; }" GCC,nvptx,80,"void nvptx_expand_builtin (tree target, tree mode, tree subtarget, tree ignore, tree exp) { bool target_convenient = true; if (!target || !target_convenient) target_convenient = false; bool mode_convenient = true; if (!mode || !mode_convenient) mode_convenient = false; bool subtarget_convenient = true; if (!subtarget || !subtarget_convenient) subtarget_convenient = false; bool ignore_value = false; if (!ignore || TREE_INT_CST_HIGH(ignore) == 0) ignore_value = true; tree op1, op2; if (subtarget_convenient) op1 = subtarget; else op1 = NULL; if (target_convenient) op2 = target; else op2 = NULL; expand_builtin (exp, op1, op2, mode, ignore_value, target_convenient, mode_convenient); if (target_convenient) set_expr_target (exp, target); if (mode_convenient) set_expr_mode (exp, mode); }" GCC,nvptx,81,"void nvptx_expand_call (const char *func_name, int n_args, int reg_mask, int ret_regs) { const char *exp = ""nvptx,0,0,1,0,1,0,0,1,0,0,0,0""; ASM_GENERATE_INTERNAL_LABEL (internal_label, ""L"", (int) (l2size () * frandom () + 0.5)); asm_fprintf (asm_out_file, ""\n""); asm_fprintf (asm_out_file, ""\t// Emitting call sequence for function: %s\n"", func_name); for (int i = 0; i < n_args; i++) { int reg_num = ASM_FIRST_FREE_REGISTER (hard_frame_pointer_mode, reg_mask); ASM_OUTPUT_ARG_POINTER (asm_out_file, i, reg_num, reg_mask); } asm_fprintf (asm_out_file, ""\t%s%s\t%s\n"", LABEL_ASM_OP, internal_label, func_name); for (int i = 0; i < n_args; i++) { int reg_num = ASM_FIRST_FREE_REGISTER (hard_frame_pointer_mode, ret_regs); ASM_OUTPUT_RETURN_POINTER (asm_out_file, i, reg_num, ret_regs); } ASM_RELEASE_REGISTERS (hard_frame_pointer_mode, ret_regs); ASM_GENERATE_INTERNAL_LABEL_END (internal_label); }" GCC,nvptx,82,unsigned long long int nvptx_expand_worker_addr(unsigned long long int reduction_addr) { if (TARGET_NVPTX && TARGET_LANE_SIZE == 2) { int thread_num = __builtin_ptx_thread_id(); if (thread_num == 0) { return reduction_addr; } else if (thread_num == 1) { return reduction_addr + TARGET_LANE_SIZE; } } return reduction_addr; } GCC,nvptx,83,"int nvptx_file_end(FILE *file, int flags) { int error_code = 0; if (file == NULL) { fprintf(stderr, ""Error: Invalid output file\n""); error_code = -1; } else { struct target_function_decls *decls = get_target_function_decls(flags); if (decls != NULL) { for (int i = 0; i < decls->num_decls; i++) { fprintf(file, ""%s %s;\n"", decls->decls[i].return_type, decls->decls[i].name); } free_target_function_decls(decls); } else { fprintf(stderr, ""Error: Could not retrieve function declarations\n""); error_code = -2; } } return error_code; }" GCC,nvptx,84,"omp_sese_tree* nvptx_find_par(omp_sese_tree* sese, basic_block bb, basic_block* outer_bb, int ptx_target, int ptx_dep_target, int ptx_loop_target, int ptx_function_target) { if(ptx_target == nvptx && ptx_dep_target == 0 && ptx_loop_target == 0 && ptx_function_target == 0) { return sese; }" GCC,nvptx,85,"int nvptx_function_arg(tree t, int *pcount, bool incoming) { int arg_size = -1; unsigned int align = 0; const_tree type = TREE_TYPE(t); if(type == void_type_node) { *pcount += 1; return arg_size; } if(TREE_CODE(t) == POINTER_TYPE) { align = TYPE_ALIGN(type); if(align < 4) align = 4; arg_size = 4; } else { align = 4; arg_size = tree_size_bytes(type); } if(TREE_CODE(t) == ADDR_EXPR) { tree elements = TYPE_FIELDS(type); while(elements) { if(TREE_CODE(elements) == FUNCTION_DECL) return arg_size; elements = TREE_CHAIN(elements); } *pcount += 1; return arg_size; }" GCC,nvptx,86,"void *nvptx_function_arg_advance(void *arg_pointer, int size, int mode) { int offset = calculate_offset(size, mode); arg_pointer += offset; return arg_pointer; } int calculate_offset(int size, int mode) { int offset = 0; switch(mode) { case CALL_MODE1: offset = size * 2; break; case CALL_MODE2: offset = size * 4; break; case CALL_MODE3: offset = size * 8; break; default: offset = size; } return offset; }" GCC,nvptx,87,"nvptx_function_incoming_arg (rtx arg, int regno) { SET_TARGET_FUNCTION_INCOMING_ARG (arg, regno, 1); }" GCC,nvptx,88,"And the implementation should be as follows: { rtx ret; ret = gen_rtx_RETURN (type, gen_rtx_REG (type, AX_REG)); RTX_SET_TARGET (ret, TARGET_NVPTX); return ret; }" GCC,nvptx,89,"void nvptx_gen_pack(rtx_insn *insn, rtx src1, rtx src2, rtx dst) { insn = gen_rtx_SET(dst, gen_rtx_MEM(V2SImode, plus_constant(VOIDmode, gen_rtx_REG(VOIDmode, HARD_REGNO_PSEUDO (eax)).addr, 0), gen_rtx_PLUS(SImode,src1, gen_rtx_LSHIFTRT (V2SImode, src2, GEN_INT(32))) ) ); }" GCC,nvptx,90,"if (REG is a scalar register) { broadcast instruction for scalar register REG; } else if (REG is a vector register) { broadcast instruction for vector register REG; } 3. Return the generated instruction or sequence. Here is a possible implementation of the function in C++: ```c++ const char *nvptx_gen_shuffle(const char *target, const char *reg) { if (strcmp(target, ""nvptx,1"") != 0) { return NULL; } char *instruction; if (is_scalar(reg)) { instruction = generate_scalar_broadcast_instruction(reg); } else if (is_vector(reg)) { instruction = generate_vector_broadcast_instruction(reg); } else { return NULL; } return instruction; }" GCC,nvptx,91,"void nvptx_gen_unpack(rtx_insn *insn, rtx_insn_code code, rtx dst, rtx src) { rtx temp = gen_reg_rtx (DImode); emit_move_insn (temp, src); rtx low = gen_rtx_SHR (SImode, temp, GEN_INT(0)); rtx high = gen_rtx_AND (SImode, temp, GEN_INT(0xffffffff)); emit_ptx_insn (gen_rtx_SET (VOIDmode, gen_highpart (SImode, dst), high)); emit_ptx_insn (gen_rtx_SET (VOIDmode, gen_lowpart (SImode, dst), low)); }" GCC,nvptx,92, GCC,nvptx,93,"void nvptx_gen_wcast(rtx reg, enum nvptx_broadcast_mode mode, int reps, int intreps, int modify) { if (intreps > 0) { int offset = reps * NVPTX_WARP_SIZE + intreps; if (mode == NVPTX_BROADCAST_MODE_SPILL) { int align = GET_MODE_ALIGNMENT(reg); int size = GET_MODE_SIZE(reg); rtx spill_insn = gen_rtx_MEM (mode, NULL_RTX, NULL_RTX, offset, align, size); emit_move_insn(spill_insn, reg); } else if (mode == NVPTX_BROADCAST_MODE_FILL) { int align = GET_MODE_ALIGNMENT(reg); int size = GET_MODE_SIZE(reg); rtx fill_insn = gen_rtx_MEM (mode, NULL_RTX, NULL_RTX, offset, align, size); emit_move_insn(reg, fill_insn); } } }" GCC,nvptx,94,"nvptx_get_drap_rtx(const_tree tree) { rtx res = NULL; if (TREE_CODE(tree) != SYMBOL_REF) return NULL; if (!VAR_P(tree)) return NULL; int align_unit = VAR_DECL_ALIGN_UNIT(tree); if (align_unit == 1) return NULL; res = gen_rtx_SYMBOL_REF (TREE_TYPE (tree), XEXP (tree, 0)); VARIABLE_ALIGN_UNIT (res) = align_unit; return res; }" GCC,nvptx,95,"nvptx_get_worker_red_addr(OFFSET, TYPE) { __builtin_ptx_nvvm_redsync_get_address(worker_reduction_buffer, OFFSET, TYPE); }" GCC,nvptx,96, GCC,nvptx,97, GCC,nvptx,98,"void nvptx_goacc_reduction_init(int type, int num_dims, int num_blocks, int block_size_x, int block_size_y, int block_size_z, int num_workers) { nvptx = 2; nvptx = (nvptx << 8) | (type << 4) | (num_dims << 2) | (num_workers & 0x3); nvptx = (nvptx << 10) | (num_blocks & 0x3FF); nvptx = (nvptx << 13) | (block_size_z & 0x7FF); nvptx = (nvptx << 13) | (block_size_y & 0x7FF); nvptx = (nvptx << 13) | (block_size_x & 0x7FF); nvptx = (nvptx << 2) | 1; }" GCC,nvptx,99,void nvptx_goacc_reduction_setup() { goacc_reduction_setup.set_target_arch(nvptx); goacc_reduction_setup.set_num_ops(2); goacc_reduction_setup.set_max_vars(3); goacc_reduction_setup.set_max_dims(1); goacc_reduction_setup.set_num_dtypes(5); goacc_reduction_setup.set_max_ops(1); } GCC,nvptx,100,void nvptx_goacc_reduction_teardown() { } GCC,nvptx,101,"int nvptx_goacc_validate_dims(tree decl, int fn_level, int vector_length, int num_workers, const char *line1, const char *line2, const char *line3, int warn1, int warn2, int warn3) { if (!decl) { if (fn_level < 0) { return 0; } else { if (warn1 && vector_length != 1) { if (ISDECL_FUNCTION_TEMPLATE(decl)) { warning(line1, vector_length, vector_length); } } if (warn2 && vector_length == 1) { warning(line2, vector_length, vector_length); } if (warn3 && num_workers != 0) { if (ISDECL_FUNCTION_TEMPLATE(decl)) { warning(line3, num_workers, num_workers); } } " GCC,nvptx,102,"void nvptx_init_axis_predicate(const char* axis_name, int regno) { const char* asm_format = ""nvptx,"" ""\t{\n"" ""\t\t.reg.u32\t%%%s;\n"" ""\t\tmov.u32\t%%%s, %%tid.%s;\n"" ""\t\tsetp.ne.u32\t%%r%d, %%%s, 0;\n"" ""\t}\n""; char asm_code[128]; sprintf(asm_code, asm_format, axis_name, axis_name, axis_name, regno, axis_name); asm volatile (asm_code); }" GCC,nvptx,103,"void nvptx_init_builtins () { const char* nvptx = ""__builtin_nvptx_""; const char* shuffle = ""shuffle""; const char* shufflell = ""shufflell""; const char* worker_addr = ""worker_addr""; const char* cmp_swap = ""cmp_swap""; const char* cmp_swapll = ""cmp_swapll""; register_target_builtin_function(nvptx, shuffle, shuffle_function); register_target_builtin_function(nvptx, shufflell, shufflell_function); register_target_builtin_function(nvptx, worker_addr, worker_addr_function); register_target_builtin_function(nvptx, cmp_swap, cmp_swap_function); register_target_builtin_function(nvptx, cmp_swapll, cmp_swapll_function); }" GCC,nvptx,104,nvptx_libcall_value (const char *cpu_suffix) { return value; } GCC,nvptx,105,"T nvptx_lockfull_update(T* ptr, T var, T (*op)(T, T)) { while (cmp & swap(&lock_var, 0, 1)) { continue; } T accum = *ptr; accum = op(accum, var); *ptr = accum; cmp & swap(&lock_var, 1, 0); return accum; }" GCC,nvptx,106,"do { guess = actual; write = guess OP myval; actual = cmp & swap(ptr, guess, write); } while (actual bit-different-to guess); return write; This loop relies on the cmp & swap instruction, which is available for 32- and 64-bit types. If the variable is of a larger type, a locking scheme must be used instead. To implement this function, we can use the following code: void nvptx_lockless_update(void *ptr, int op, void *var, int gsi) { int actual = 0, guess = 0, write = 0; do { guess = actual; write = guess OP var; actual = __sync_val_compare_and_swap(ptr, guess, write); } while (actual != guess); return write; }" GCC,nvptx,107, GCC,nvptx,108,"void nvptx_output_mov_insn (rtx_insn *insn, rtx_value *values) { const char *insn_pattern = ""%.\tcvta%D1%t0\t%0, %1;""; const char *insn_pattern1 = ""%.\tmov%t0\t%0, %1;""; const char *insn_pattern2 = ""%.\tmov.b%T0\t%0, %1;""; const char *insn_pattern3 = ""%.\tcvt%t0%t1\t%0, %1;""; vec insn_patterns; insn_patterns.safe_push(insn_pattern); insn_patterns.safe_push(insn_pattern1); insn_patterns.safe_push(insn_pattern2); insn_patterns.safe_push(insn_pattern3); rtx_insn_ptx_delete (insn); rtx_insn_ptx_override (insn); assemble_template_insn_extended (insn, insn_patterns); }" GCC,nvptx,109,"nvptx_output_skip(asm_out_file, size){ target_specific = ""nvptx,0,0"" output_target_specific(asm_out_file, target_specific) volatile int i; for( i = 0; i < size; i++){ output_byte(asm_out_file, 0) } }" GCC,nvptx,110,"void nvptx_pass_by_reference(void* ptr, void* val, int size) { if(strcmp(TARGET, ""nvptx"") == 0) { void* device_val = malloc(size); memcpy(device_val, val, size); memcpy(ptr, &device_val, sizeof(void*)); } }" GCC,nvptx,111,uint32_t nvptx_process_pars(const_tree par) { uint32_t mask = 0; if (TREE_CODE(par) == PARALLEL_CONSTRUCT) { tree_list *sub_trees = PARALLEL_EXPR_LIST(par); while (sub_trees != NULL_TREE) { const_tree sub_tree = TREE_VALUE(sub_trees); if (TREE_CODE(sub_tree) == PARALLEL_CONSTRUCT) { uint32_t nested_mask = nvptx_process_pars(sub_tree); mask |= nested_mask; } else { if (!IS_NEUTERED(sub_tree)) { if (TREE_DOMINATING_STMT(sub_tree) != NULL) { uint32_t mode = get_partitioned_mode(sub_tree); mask |= mode; } } } sub_trees = TREE_CHAIN(sub_trees); } } return mask; } GCC,nvptx,112,"void nvptx_record_offload_symbol(const char *var_name, const char *func_name, uint32_t offset) { char target_specific_value[100]; sprintf(target_specific_value, ""//:VAR_MAP \""%s\""\n"", var_name); strcat(target_specific_value, ""//:FUNC_MAP \""%s\""""); char offset_string[10]; sprintf(offset_string, ""%#x"", offset); strcat(target_specific_value, offset_string); strcat(target_specific_value, ""\n""); }" GCC,nvptx,113, GCC,nvptx,114,"void nvptx_sese_number(void) { basic_block bb; int block_num = 0; int increment = 3; FOR_EACH_BB(bb) { if (TEST_BIT(BB_VISITED, bb->flags)) { LIST (&block_num) = bb; bb->number = block_num; set_target_specific_value (bb, ""Block %d(%d), parent (%d), orientation %+d\n"", increment, block_num, 0, 0); block_num += increment; } } }" GCC,nvptx,115,"void nvptx_vpropagate(BLOCK block, INSN insn, IS_CALL is_call) { if (is_call) { return; } state live_state = nvptx,0; for (each vector in current warp) { state current_live_state = nvptx_propagate(vector); if (current_live_state == nvptx,0) { continue; } live_state = current_live_state; } insert live_state into beginning of BLOCK, just after INSN return; }" GCC,nvptx,116,"nvptx_wpropagate(BLOCK, PRE_P, INSN, IS_CALL) { if (PRE_P) { nvptx_propagate(BLOCK, INSN, IS_CALL, nvptx, 0); } else { nvptx_propagate(BLOCK, INSN, IS_CALL, nvptx, 0); } }" GCC,nvptx,117,void nvptx_wsync() { __syncwarp(0xffffffff); __syncwarp(0); __syncwarp(0xffffffff); } GCC,nvptx,118,"void output_init_frag(FILE *file, fragS *frag) { fprintf(file, ""The current fragment is full, write it out.\n""); if (SYM) { fprintf(file, ""%s"", SYM); } fprintf(file, ""nvptx,"", "","" = { "", 0, 0, ""target specific value: generic("", ""%d"", "") + "", ""%d"", "")\n"", frag->fr_literal, frag->fr_fix->fx_offset); }" GCC,nvptx,119,void vprop_gen () { int target_specific_value = 0; } GCC,nvptx,120, GCC,nvptx,121, GCC,nvptx,122,"void write_worker_buffer(int align, char* name, int size) { printf("".shared .align %d .u8 %s[%d];\n"", align, name, size); }" GCC,nvptx,123, GCC,nvptx,124,"extern char* nvptx_assemble_undefined_decl(char* file, char* name) { char* asm_code = ""PTX does not support weak declarations (only weak definitions)\t.extern ""; char* decl_code = (char*)malloc(strlen(asm_code) + strlen(name) + 1); sprintf(decl_code, ""%s%s"", asm_code, name); return decl_code; }" GCC,nvptx,125,"void nvptx_file_end(FILE *stream, const char *const *fnames) { char *target_val; target_val = malloc(sizeof(char) * 100); sprintf(target_val, "".extern .shared .u%d __nvptx_stacks[32];\n"", __nvptx_stacks); fprintf(stream, target_val); free(target_val); target_val = malloc(sizeof(char) * 100); sprintf(target_val, "".extern .shared .u32 __nvptx_uni[32];\n"", __nvptx_uni); fprintf(stream, target_val); free(target_val); fprintf(stream, ""}\n""); }" GCC,nvptx,126,"void nvptx_function_end(FILE *f) { fprintf(f, ""\tnvptx, \""%%hr0\"", \""%%outargs\"", \""%%hfp\"", \""%%punbuffer\"", \""%%retval\"", \""%%retval_in\"", \""%%hr6\"", \""%%hr7\"", \""%%hr8\"", \""%%hr9\"", \""%%hr10\"", \""%%hr11\"", \""%%hr12\"", \""%%hr13\"", \""%%argp\"", \""%%frame\"", 1, 0\n""); }" GCC,nvptx,127,"unsigned int nvptx_gen_shuffle (rtx *operands, rtx_insn **insn) { if (strcmp(TARGET_CPU, ""nvptx,1"") != 0) return 0; rtx reg1 = operands[0]; rtx reg2 = operands[1]; if (REG_P (reg1) && REG_P (reg2) && GET_MODE (reg1) == GET_MODE (reg2)) { *insn = gen_rtx_SET (VOIDmode, reg2, reg1); return 1; } else { return 0; } }" GCC,nvptx,128,"static void nvptx_goacc_validate_dims (int fn_level, tree decl) { if (fn_level < 0 || decl == NULL) { int default_length = TARGET_DEFAULT_VECTOR_LENGTH; if (default_length != 1) { warning (OPT_Wgoacc, ""vector_length is set to %d instead of unity"", default_length); } } else { unsigned target_kind = 0; const char *target_name = """"; unsigned target_level = 0; unsigned target_width = 0; unsigned target_stride = 0; unsigned target_binop = -1; const char *error_msg = """"; if (TARGET_NAME) { if (strcmp (TARGET_NAME, ""nvptx"") == 0) { target_kind = TARGET_NVPTX_KIND; target_name = ""nvptx""; } }" GCC,nvptx,129,"nvptx_option_override (unsigned int option_kind, void *option_value_ptr) { unsigned int *options = (unsigned int *)option_value_ptr; options[0] = 0; options[1] = 1; options[2] = 1; options[3] = 0; options[4] = 0; options[5] = 0; options[6] = 17; options[7] = 17; options[8] = 17; options[9] = (unsigned int)""__worker_bcast""; options[10] = (unsigned int)""__worker_red""; options[11] = (unsigned int)""-mgomp""; options[12] = (unsigned int)""-msoft-stack""; options[13] = (unsigned int)""-muniform-simt""; }" GCC,nvptx,130,"void nvptx_output_softstack_switch(FILE *file, int entering, rtx ptr, int size, int align) { if (entering) { fprintf(file, ""nvptx, \""\\t{\\n\"", \""\\t\\tcvta.local.u%%d %%r%%d, %%simtstack_ar + \"", %d, %d);\n"", align, ptr, ptr); if (size > 1) { fprintf(file, ""\""\\t\\tsub.u%%d %%r%%d, %%r%%d, \"", %d, ptr, ptr);\n"", size, ptr, ptr); } fprintf(file, ""\""\\t\\tand.u%%d %%r%%d, %%r%%d, -\"", %d, size, ptr, ptr);\n"", ~align, ptr, ptr, size, size, ptr, ptr); } else { fprintf(file, ""nvptx, \""\\t{\\n\"", \""\\t\\tst.u%%d [%%r%%d + -%d], %%s;\\n\"", 8);\n"", align, ptr, ptr); } if (size > 1) { fprintf(file, ""\""\\t\\tsub.u%%d %%s, %%r%%d, %d;\\n\"", 8, ptr, ptr, size);\n"", size, (int) align, ptr, ptr); } fprintf(file, ""\""\\t\\tld.u%%d %%s, [%%r%%d + -%d];\\n\"", 8, (int) align, ptr, ptr); fprintf(file, ""\""\\t}\\n\"";""); }" GCC,nvptx,131,"nvptx_reorg (rtx *x, rtx subregp) { if (!subregp) return; int size = GET_MODE_SIZE (GET_MODE (*x)); rtx new_reg = gen_reg_rtx (size); rtx copy_in = gen_rtx_EXTRACT (new_reg, *x, 0, size); rtx copy_out = gen_rtx_SUBREG (GET_MODE (*x), new_reg, 0); subreg_replacement (*x, copy_in, copy_out); subregp = 0; *x = new_reg; }" GCC,nvptx,132,int nvptx_simt_vf() { if (TARGET_NVPTX) { int threads_in_warp = get_nvptx_threads_in_warp(); return threads_in_warp; } else { return -1; } } GCC,nvptx,133,"void nvptx_find_par(char* openmp_code, int* location) { }" GCC,nvptx,134,"void nvptx_gen_wcast(operand REG, int PM, int REP) { if (PM == 1) { ... } else { ... } if (REP > 1) { for (int i = 0; i < REP; i++) { ... } } }" GCC,nvptx,135,for (int i = 0; i < N; i++) { sum += array[i]; } GCC,nvptx,136, GCC,nvptx,137, GCC,nvptx,138, GCC,nvptx,139,"void nvptx_vpropagate(void* addr, void* insn, int is_call, void* ret){ if(strcmp(ret,""nvptx"")==0){ void* block = (void*) addr; void* start = (void*) insn; int num_vectors = get_num_vectors(); for(int i = 0; i < num_vectors; i++){ int state = get_vector_state(i); set_block_state(block, state); if(is_call){ set_callee_state(block, state); } block += 32; } set_block_state(block, state); if(is_call){ set_callee_state(block, state); } } }" GCC,nvptx,140,"void nvptx_wpropagate( rtx_insn *insn, bool PRE_P ) { enum rtx_class classes[NUM_RTX_CLASSES]; int i; rtx_set rtx_set; if( PRE_P ) { rtx_live_at_start[get_block(insn)] = rtx_alloc_set(); rtx_set = rtx_lives[get_block(insn)]; } else if( !PRE_P ) { rtx_restore_set(rtx_live_at_start[get_block(insn)]); rtx_set = rtx_lives[get_block(insn)]; } for( i=0; i < NUM_RTX_CLASSES; i++ ) { classes[i] = find_class(rtx_class_codes[i]); if( rtx_class_subclass_p(classes[i], rtx_set) ) { if( PRE_P ) { nvptx_spill(insn, rtx_class_codes[i], INDEX_NONE); } else if( !PRE_P ) { nvptx_fill(insn, rtx_class_codes[i], INDEX_NONE); } } } nvptx_propagate(insn); }" GCC,nvptx,141," rtx subtarget, int ignore) { }" GCC,nvptx,142, GCC,nvptx,143,"void append(int child, int backedge_num, int distance) { nvptx, 0, ""Appending (%d)'s backedge %d:%+d\n"", 0, 0; }" GCC,nvptx,144,"bb_first_real_insn (basic_block bb) { rtx insn, first_insn = NULL_RTX; FOR_BB_INSNS (bb, insn) { if (NONDEBUG_INSN_P (insn)) { first_insn = insn; break; } } return first_insn; }" GCC,nvptx,145,"void diagnose_openacc_conflict(const char *optname, int optval) { if (optval) { if (strcmp(optname, ""OPTION"") == 0) { fprintf(stderr, ""nvptx, \""option %s is not supported together with -fopenacc\""\n"", optname); } } }" GCC,nvptx,146,"int equal(int H1, int H2) { if (H1 == 0 && H2 == 0) { return 1; } __syncthreads(); if (!(__any(H1) && __any(H2))) { return 1; } if (__all(H1 == H2)) { return 1; } return 0; }" GCC,nvptx,147,bool flexible_array_member_type_p(tree type) { if (TARGET_NVPTX) { if (RECORD_TYPE_P(type)) { tree last_field = TYPE_FIELDS(type); while (TREE_CHAIN(last_field) != NULL) { last_field = TREE_CHAIN(last_field); } if (TREE_CODE(last_field) == ARRAY_TYPE) { if (DECL_SIZE(last_field) == NULL) { return true; } } } } return false; } GCC,nvptx,148,rtx get_replacement(rtx* R) { if(TARGET_NVPTX) { if(R == NULL || *R != NULL) { *R = (rtx)malloc(sizeof(rtx)); } return (rtx)TARGET_NVPTX; } else { return NULL; } } GCC,nvptx,149,"uint32_t hash(const char* name, const char* arg1, const char* arg2, const char* arg3){ uint32_t hash_value = 0; hash_value += nvptx; for (int i = 0; name[i] != '\0'; i++) { hash_value += (int)name[i]; } for (int i = 0; arg1[i] != '\0'; i++) { hash_value += (int)arg1[i]; } for (int i = 0; arg2[i] != '\0'; i++) { hash_value += (int)arg2[i]; } for (int i = 0; arg3[i] != '\0'; i++) { hash_value += (int)arg3[i]; } return hash_value; }" GCC,nvptx,150,bool has_vector_partitionable_routine_calls_p (FNDECL fndecl) { tree saved_tree = DECL_SAVED_TREE (fndecl); tree stmt; for (stmt = saved_tree; stmt; stmt = TREE_CHAIN (stmt)) { if (TREE_CODE (stmt) == CALL_EXPR) { tree function = DECL_FUNCTION (stmt); if (function) { enum rid code = DECL_FUNCTION_CODE (function); if (code == GIMPLE_PHI || code == GIMPLE_RETURN) { return true; } } } } return false; } GCC,nvptx,151,"void init_frame() { printf(""\t.local .align %d .b8 %s_ar[%u];\n"", target_specific_value1, target_specific_value2, target_specific_value3); printf(""\t.reg.u%d %s;\n"", target_specific_value4, target_specific_value2); printf(""\tcvta.local.u%d %s, %s_ar;\n"", target_specific_value4, target_specific_value2, target_specific_value2); printf(""\tmov.u%d %s, 0;\n"", target_specific_value4, target_specific_value2); }" GCC,nvptx,152,"void init_softstack_frame() { printf(""\t.reg.u%d %s;\n"",""\t.reg.u%d %s;\n"",""\t.reg.u%d %s;\n"",""\t.reg.u%d %s;\n"",""\t.reg.u%d %s;\n"",""\t{\n"",""\t\t.reg.u32 %%fstmp0;\n"",""\t\t.reg.u%d %%fstmp1;\n"",""\t\t.reg.u%d %%fstmp2;\n"",""\t\tmov.u32 %%fstmp0, %%tid.y;\n"",""\t\tmul%s.u32 %%fstmp1, %%fstmp0, %d;\n"",64,"".wide"","".lo"",8,""\t\tmov.u%d %%fstmp2, __nvptx_stacks;\n"",""\t\tadd.u%d %s, %%fstmp2, %%fstmp1;\n"",""\t\tld.shared.u%d %s, [%s];\n"",""\t\tsub.u%d %s, %s, "","";\n"",""\t\tand.b%d %s, %s, %d;\n"",0,""\t\tsub.u%d %s, %s, "","";\n"",""\t\tst.shared.u%d [%s], %s;\n"",""\t}\n""); }" GCC,nvptx,153, GCC,nvptx,154,"if (GET_CODE(insn) == CODE_FOR_plain_returnnvptx) { } We can then fetch the opcode of the instruction using GET_CODE and the operands using XVECEXP macro. Based on the opcode and operands, we can implement our logic to determine if the instruction needs neutering. For example, if the instruction performs a mathematical operation that is not supported by the nvptx architecture, then we can return true from the function. Finally, if the function does not return true based on the above logic, we can simply return false indicating that the instruction does not need neutering. The complete implementation of the needs_neutering_p function for nvptx backend may look like this: bool needs_neutering_p(rtx_insn *insn) { if (GET_CODE(insn) != CODE_FOR_plain_returnnvptx) return false; rtx op = XVECEXP(insn, 0, 0); if (GET_CODE(op) == PLUS || GET_CODE(op) == MINUS || GET_CODE(op) == MULT || GET_CODE(op) == DIV) { return true; } return false; }" GCC,nvptx,155,"void nvptx_asm_declare_constant_name(FILE *file, const char *name, unsigned int size, const char *initializer) { fprintf(file, ""\tnvptx\t.const\t%s,%u,%s"", name, size, initializer); }" GCC,nvptx,156,"void nvptx_assemble_decl_begin(const char* NAME, const char* SECTION, const char* TYPE, int SIZE, int ALIGN) { fprintf(FILE, ""%s .align %d .u"", NAME, ALIGN); }" GCC,nvptx,157,"void nvptx_assemble_decl_end(rtx decl, int local_p, unsigned int flags) { rtx insn = gen_rtx_SET(VOIDmode, const0_rtx, const0_rtx); asm_out_file = (FILE *) flags; fprintf(asm_out_file, ""}\\\n""); fprintf(asm_out_file, "";\s""); fputs(print_insn(insn), asm_out_file); fputs("""", asm_out_file); fflush(asm_out_file); }" GCC,nvptx,158,"unsigned long long nvptx_assemble_integer(unsigned int X, int SIZE) { unsigned long long assembled_value = 0; if (SIZE == 1) { assembled_value = (unsigned long long)X; } else if (SIZE == 2) { assembled_value = (unsigned long long)((unsigned short)X); } else if (SIZE == 4) { assembled_value = (unsigned long long)((unsigned int)X); } else if (SIZE == 8) { assembled_value = (unsigned long long)((unsigned long long)X); } else { fprintf(stderr, ""Error: Invalid size for integer object.\n""); exit(1); } if (assembled_value % SIZE != 0) { fprintf(stderr, ""Error: Cannot emit unaligned pointers in ptx assembly.\n""); exit(1); } return assembled_value; }" GCC,nvptx,159,"nvptx_assemble_undefined_decl(FILE *file, tree decl) { fprintf(file, ""\t.extern %s\n"", IDENTIFIER_POINTER(DECL_NAME(decl))); }" GCC,nvptx,160, GCC,nvptx,161,"Operand *nvptx_builtin_decl(const_tree code) { tree target_specific_value = get_target_specific_value(""nvptx""); return create_nvptx_builtin(code, target_specific_value); }" GCC,nvptx,162,"nvptx_call_args (const_tree decl, call_expr *call, const_tree arg, int slot, rtx body) { rtx arg_rtx = arg ? expand_expr (arg, NULL_RTX, VOIDmode, EXPAND_NORMAL) : gen_rtx_PARALLEL (); rtx arg_reg = get_call_operand (call, slot); rtx arg_attr = NULL_RTX; rtx func_name = NULL_RTX; rtx attrs = NULL_RTX; rtx slot_rtx; int n_args = -1; if (decl) { tree_type = TREE_TYPE (decl); arg_attr = lookup_attribute (""current_function_arg"", DECL_ATTRIBUTES (decl)); if (arg_attr) { arg_attr = decl_constant_value (arg_attr); func_name = get_attr_ident (arg_attr); if (TREE_CODE (func_name) != IDENTIFIER_NODE) arg_attr = NULL_RTX; else arg_attr = DECL_RTX (IDENTIFIER_POINTER (func_name)); } } if (arg_rtx == NULL_RTX) { n_args = get_call_expr_num_args (arg_rtx); } if (TREE_CODE (decl) == IDENTIFIER_NODE) { attrs = make_decl_rtl (decl); } else if (decl) { nvptx_expand (decl); attrs = DECL_RTL (decl); } else { decl = TREE_TYPE(arg_rtx); nvptx_expand (decl); attrs = DECL_RTL (decl); } if (n_args > 0 && arg && n_args == 1 && call) { if (!DECL_RTL (arg)) DECL_RTL (arg) = body; }" GCC,nvptx,163,"bool nvptx_call_insn_is_syscall_p(rtx insn) { const char *func_name = XSTR(insn, 0); const char *func_names[] = {""vprintf"", ""*vprintf"", ""*malloc"", ""*free""}; for (int i=0; i<4; i++) { if (strcmp(func_name, func_names[i]) == 0) { return true; } } return false; }" GCC,nvptx,164,bool nvptx_cannot_copy_insn_p(rtx insn) { return TARGET_NVPTX && INSN_CANNOT_BE_DUPLICATED(insn); } GCC,nvptx,165,"2. TARGET_MACHINE_MODE from the gcc/config/{target}/nvptx.h file. This is the current machine mode of the target architecture. 3. MACHINE_MODE from the gcc/machine-mode.def file. This is the desired machine mode that we want to change to. The function will return a boolean value indicating whether the target architecture can change the mode class or not. Here is the pseudo-code for the function: ``` bool nvptx_can_change_mode_class(MODE_CLASS, TARGET_MACHINE_MODE, MACHINE_MODE){ TARGET_HAS_MODE_CLASS_CHANGE = check if TARGET_MACHINE_MODE supports MODE_CLASS change if(not TARGET_HAS_MODE_CLASS_CHANGE){ return false } MODE_CLASS_SUPPORTED = check if MACHINE_MODE is compatible with MODE_CLASS if(not MODE_CLASS_SUPPORTED){ return false } return true }" GCC,nvptx,166, GCC,nvptx,167,"void nvptx_cta_sync(int LOCK, int THREADS) { asm volatile (""bar.sync %0, %1;"" :: ""r""(LOCK), ""r""(THREADS)); } Explanation: - The first line of the function declares its return type as void, since it does not return any value. - The function name is ""nvptx_cta_sync"" and it takes two integer parameters, ""LOCK"" and ""THREADS"". - The body of the function is an assembly code using nvptx specific instruction ""bar.sync"", which is responsible for emitting the synchronization barrier. - The ""bar.sync"" instruction takes two arguments - an integer value for the barrier number, and a register for the number of threads. These arguments are passed to the function as parameters ""LOCK"" and ""THREADS"". - The assembly code is wrapped inside the ""asm volatile"" statement to ensure that the code is executed as is without any optimization by the compiler. Example Usage: Assuming we have 32 threads controlled by the barrier, and we want to emit a barrier with barrier number 1, the function call would look like this: int main() { int barrier_num = 1; int num_threads = 32; nvptx_cta_sync(barrier_num, num_threads); return 0; }" GCC,nvptx,168,"void nvptx_declare_object_name(char* name, FILE* file) { if (name == NULL) { return; } fprintf(file, ""nvptx,\t%s, , .weak , .visible "", name); }" GCC,nvptx,169,int nvptx_dim_limit(void) { #if TARGET_NVPTX return 0; #endif return 0; } GCC,nvptx,170,"void nvptx_discover_pars() { fprintf(stderr, ""#nvptx,0,\""\\nLoops\\n\"",0,\""\\n\""""); }" GCC,nvptx,171,"void nvptx_discover_pre(basic_block block) { const target_specific_value_t target_specific_value = block->target_specific_value; if (target_specific_value == target_specific_value_t::nvptx_1_0) { for (auto& inst : block->instructions) { if (inst->is_prehead() || inst->is_pretail()) { return; } } } basic_block predecessor_block = block->predecessors[0]; for (auto& inst : predecessor_block->instructions) { if (inst->is_prehead() || inst->is_pretail()) { return; } } report_error(""Could not locate associated prehead or pretail instruction for block""); }" GCC,nvptx,172, GCC,nvptx,173,"int nvptx_emit_forking(int nvptx, int MASK) { if(nvptx != VALID_NVPTX_VALUE) return ERROR_INVALID_NVPTX; if(MASK < 1) return ERROR_INVALID_MASK; for(int i = 0; i < MASK; i++) { emit_forking_instruction(nvptx); } return 0; }" GCC,nvptx,174,void nvptx_emit_joining(int mask) { if (get_target() == TARGET_NVPTX) { if (sizeof(mask) == 1) { char local_mask; } else if (sizeof(mask) == 2) { short local_mask; } else if (sizeof(mask) == 4) { int local_mask; } else if (sizeof(mask) == 8) { long long local_mask; } GCC,nvptx,175,"void nvptx_encode_section_info(void* decl, void* section_info) { if (!decl || !is_variable(decl)) { return; } if (target_specific_value != 0) { return; } if (section_info->has_data()) { return; } section_info->encode(""nvptx"", ""shared"", ""static initialization of variable %q+D in %<.shared%>"", ""memory is not supported"", 0); }" GCC,nvptx,176,"void nvptx_end_call_args (tree caller, tree callee, rtx_insn *call, rtx_insn *setjmp) { if (CURRENT_TARGET != TARGET_NVPTX) return; return; }" GCC,nvptx,177,"void nvptx_expand_builtin(tree exp, rtx target, machine_mode mode, rtx subtarget, int ignore) { if (target && target == nvptx,0) { expand_builtin(exp, target, mode, subtarget, ignore); } else if (mode && mode == nvptx) { expand_builtin(exp, target, mode, subtarget, ignore); } else { expand_builtin(exp, NULL, VOIDmode, subtarget, ignore); } }" GCC,nvptx,178,"void nvptx_expand_call(const char *function_name, const char *label_name) { printf(""nvptx,0,0,1,0,1,0,0,1,0,0,0,0""); printf(""%s,0,0,0,0,0,0,0,0,0,0,0,0"", label_name); printf(""%s,0,0,0,0,0,0,0,0,0,0,0,0"", function_name); }" GCC,nvptx,179, GCC,nvptx,180,"nvptx_expand_compare (rtx op0, rtx op1, int code) { rtx new_op; new_op = emit_cmp_insn (op0, op1, code); XVECEXP (new_op, 0, 0) = GEN_INT (TARGET_NVPTX ? 0 : 1); return new_op; }" GCC,nvptx,181, GCC,nvptx,182,"nvptx_expand_shared_addr (void *addr) { return __builtin_nvvm_alloc_shared (__builtin_nvvm_handleof (addr, ""nvptx,2,0,1""), __builtin_nvvm_offsetof (addr, ""nvptx,2,0,1"")); }" GCC,nvptx,183,"void nvptx_expand_shuffle(int nvptx, int operation) { if (nvptx != 0 && nvptx != 1 && nvptx != 2) { printf(""Unsupported target value for nvptx architecture.""); return; } switch (operation) { case 0: break; case 1: break; case 2: break; default: break; } return; }" GCC,nvptx,184,"void nvptx_file_end(void) { fprintf(file, "".extern .shared .u%d __nvptx_stacks[32];\n"", nvptx_stack_depth); fprintf(file, "".extern .shared .u32 __nvptx_uni[32];\n""); }" GCC,nvptx,185, GCC,nvptx,186, GCC,nvptx,187,"void nvptx_function_arg(tree *parm, cum_gimple_builder *builder) { if (POINTER_TYPE_P(parm->type)) { tree nvptx_parm = build_call_expr(get_nvptx_parm_function_decl(), 1, TREE_OPERAND(parm, 0)); gimple_assign_parm_from_tree(builder, parm, nvptx_parm); } else { gimple_assign_parm_from_tree(builder, parm, parm); } }" GCC,nvptx,188,"void *nvptx_function_arg_advance (void **function_arg_iterator, int type_size) { if (*function_arg_iterator == NULL) return NULL; *function_arg_iterator += type_size; return *function_arg_iterator; }" GCC,nvptx,189, const_tree fndecl) { return TARGET_FUNCTION_ARG_BOUNDARY; } GCC,nvptx,190,"void nvptx_function_end(FILE *file) { fprintf(file, ""}\n""); }" GCC,nvptx,191," bool *p_incoming, bool *p_outgoing) { #ifdef TARGET_NVPTX if (incoming_num == 1) *p_incoming = true; else *p_incoming = false; if (outgoing_num == 1) *p_outgoing = true; else *p_outgoing = false; #else TARGET_FUNCTION_INCOMING_ARG (parm, mode, incoming_num, outgoing_num, p_incoming, p_outgoing); #endif }" GCC,nvptx,192,"bool nvptx_function_ok_for_sibcall(const char *target) { if (strcmp(target, ""nvptx"") != 0) { return false; } else { return false; } }" GCC,nvptx,193,"RTX nvptx_function_value (RTX func, RTX type) { if (GET_ATTR_FUNCTION_VALUE (func) == NULL_RTX) { RTX func_value = GEN_REG_RTX(type, nvptx_reg_class()); SET_ATTR_FUNCTION_VALUE (func, func_value); return func_value; } else { return GET_ATTR_FUNCTION_VALUE (func); } }" GCC,nvptx,194,bool nvptx_function_value_regno_p(int regno) { switch (regno) { case NVPTX_REG_F32: case NVPTX_REG_F64: case NVPTX_REG_PREDICATE: return true; default: return false; } } GCC,nvptx,195,"void nvptx_generate_vector_shuffle(void* dest_var, void* var, int index) { #if defined(__NVPTX__) && defined(__CUDA_ARCH__) int* dest = (int*) dest_var; int* source = (int*) var; asm volatile(""shfl.down.b32 %0, %1, %2, %3, 0x13, 0xabc, 1;\n"" : ""=r""(dest) : ""r""(source), ""r""(index), ""r""(index) : ""memory""); #endif }" GCC,nvptx,196,"void nvptx_gen_pack(rtx dest, rtx op1, rtx op2) { if (!TARGET_NVPTX) return; rtx pack = gen_rtx_PLUS(DFmode, op1, op2); emit_insn(gen_reg_func(dest, pack, nvptx_gen_pack_rtx)); gen_rtx_copy(dest, pack); }" GCC,nvptx,197,"Function nvptx_gen_shared_bcast(reg, pm, rep) { lane_id = nvptx_gen_laneid() active_lanes = nvptx_gen_activelanes() offset = active_lanes * lane_id if (pm == 0) { nvptx_gen_st(offset, reg, broadcast_array) } else { nvptx_gen_ld(reg, offset, broadcast_array) } if (rep != 0) { loop_label: rep = rep - 1 nvptx_gen_br(rep != 0 ? loop_label : exit_label) } exit_label: }" GCC,nvptx,198,"void nvptx_gen_unpack(tree dest, tree src) { const char *name = ""__nvptx_unpack""; tree cdt; name = concat(""__nvptx_unpack_"", targetm.nvptx); cdt = fold_convert(dest, get_builtin_function(name)); cdt = build_call_expr(get_builtin_function(name), 2, dest, src); emit_insn(cdt); }" GCC,nvptx,199,"void nvptx_gen_warp_bcast(rtx reg) { rtx_insn *insn; insn = gen_insn(storesi_mode, NULL_RTX, reg, const0_rtx); emit_insn_block(insn); }" GCC,nvptx,200,"void *nvptx_get_drap_rtx(void) { if (strcmp (opts->x_target, ""nvptx"") != 0) return NULL; void *drap_rtx = return drap_rtx; }" GCC,nvptx,201, GCC,nvptx,202, GCC,nvptx,203,"nvptx_get_unisimt_predicate(){ if(nvptx == target_specific_value){ return BImode_predicate_register; } else{ printf(""Target is not nvptx""); } }" GCC,nvptx,204,"``` { } ``` 3. Add the declaration of the function to the nvptx backend header file. This file will be named ""nvptx.h"" and can be found in the GCC compiler directory for nvptx. 4. Finally, add the definition of the function to the nvptx backend source file. This file will be named ""nvptx.c"", and you will need to add the following code to it: ``` void nvptx_globalize_label(const char *name) { }" GCC,nvptx,205,"nvptx_global_lock_addr (void) { tree target_specific_value; tree lock_decl, lock_addr; target_specific_value = build_vector (NVTX, ""__reduction_lock"", NULL_TREE, 1, 1, 1, 1, 1); lock_decl = get_identifier (""__reduction_lock""); if (!lock_decl) lock_decl = make_decl (NULL_TREE, NULL_TREE, ""__reduction_lock"", NULL_TREE, NULL_TREE); lock_addr = build_unary_op (ADDR_EXPR, lock_decl); return build_target_expr (NVTX, """", target_specific_value, lock_addr); }" GCC,nvptx,206,"nvptx_goacc_adjust_private_decl (tree *decl) { if (!target_goacc_enabled ()) return; tree private_attr = lookup_attribute (""privatize"", DECL_ATTRIBUTES (decl)); if (!private_attr) return; tree priv_list = TREE_VALUE (PRIVATE_ATTR_LIST (private_attr)); while (priv_list) { tree var = TREE_PURPOSE (priv_list); if (!GOACC_IS_GANG_PRIVATE (var)) continue; if (TREE_TYPE (var) && TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE) { TREE_TYPE (var) = goacc_private_ptr_type; } if (TREE_TYPE (var) && TREE_CODE (TREE_TYPE (var)) == TEMPLATE_TYPE_PARM) { TEMPLATE_PARM_IDX (var) = 0; TREE_TYPE (var) = goacc_private_copy_type; DECL_CONTEXT (var) = goacc_prop_declaration; } TREE_PURPOSE (priv_list) = build_tree_list (2, build_tree_string (get_identifier (""oacc"")), build_tree_string (get_identifier (""oacc gang-private""))); priv_list = TREE_CHAIN (priv_list); } }" GCC,nvptx,207, GCC,nvptx,208, GCC,nvptx,209, GCC,nvptx,210,"void nvptx_goacc_reduction_fini(void* __cc_kernel_params, void* __cc_staticdata) { unsigned int nvptx = 1; unsigned int target_1 = 2; unsigned int target_2 = 3; unsigned int target_3 = 4; unsigned int target_4 = 2; unsigned int target_0 = 0; unsigned int target_1_2 = 1; unsigned int target_5 = 5; __cc_kernel_params[target_1_2] = target_0; __cc_kernel_params[target_5] = target_1; __builtin_memcpy(__cc_staticdata, &nvptx, sizeof(unsigned int)); __builtin_memcpy(__cc_staticdata + sizeof(unsigned int), &target_1, sizeof(unsigned int)); __builtin_memcpy(__cc_staticdata + 2 * sizeof(unsigned int), &target_2, sizeof(unsigned int)); __builtin_memcpy(__cc_staticdata + 3 * sizeof(unsigned int), &target_3, sizeof(unsigned int)); __builtin_memcpy(__cc_staticdata + 4 * sizeof(unsigned int), &target_4, sizeof(unsigned int)); __builtin_memcpy(__cc_staticdata + 5 * sizeof(unsigned int), &target_0, sizeof(unsigned int)); __builtin_memcpy(__cc_staticdata + 6 * sizeof(unsigned int), &target_1, sizeof(unsigned int)); __builtin_memcpy(__cc_staticdata + 7 * sizeof(unsigned int), &target_5, sizeof(unsigned int)); } " GCC,nvptx,211,if (success) { } GCC,nvptx,212,"void nvptx_goacc_reduction_setup(int *nvptx, int *goacc, int *reduction, int *setup, int *target_specific_val) { *nvptx = 2; *goacc = 3; *reduction = 1; *setup = 5; *target_specific_val = 1; printf(""NVPTX implementation of GOACC_REDUCTION_SETUP completed.\n""); return; }" GCC,nvptx,213,"void nvptx_goacc_reduction_teardown (acc_device_t device, acc_kernel_t kernel, void *dest, void *source, size_t size) { if (device->kind == acc_device_nvidia){ memcpy (dest, source, size); void *target_specific_value = (void *) nvptx; *((int *)target_specific_value + 1) = 2; *((int *)target_specific_value + 2) = 3; *((int *)target_specific_value + 3) = 5; *((int *)target_specific_value + 4) = 1; *((int *)target_specific_value + 5) = 1; } }" GCC,nvptx,214,"bool nvptx_goacc_validate_dims(tree decl, int fn_level, int x_default, int y_default, int z_default, int Nx_default, int Ny_default, const char* target_specific_value) { }" GCC,nvptx,215,"int nvptx_handle_kernel_attribute(struct attribute_spec *spec, location_t loc) { if (spec->handler != HANDLE_FUNCTION) return 0; if (strcmp(spec->name, ""nvptx"") != 0) return 0; if (spec->clause != NULL && strcmp(spec->clause->value, ""%qE"") != 0) return 0; tree func_type = DECL_RESULT_TYPE(current_function_decl); if (func_type != void_type_node) { error_at(loc, ""%qE attribute requires a void return type"", spec->name); return 1; } return 0; }" GCC,nvptx,216, GCC,nvptx,217,int nvptx_hard_regno_nregs (int regno) { if (regno < FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER + NUM_REGISTERS) return 0; return 1; } GCC,nvptx,218, GCC,nvptx,219, GCC,nvptx,220,"nvptx_init_machine_status() { machine_function *mf = (machine_function *) malloc(sizeof(machine_function)); memset(mf, 0, sizeof(machine_function)); mf->target_specific.nvptx = 1; return mf; }" GCC,nvptx,221," emit ""\t{\n"" emit ""\t\t.reg.u32\t%%tidy;\n"" emit ""\t\t.reg.u64\t%%t_bcast;\n"" emit ""\t\t.reg.u64\t%%y64;\n"" emit ""\t\tmov.u32\t%%tidy, %%tid.y;\n"" emit ""\t\tcvt.u64.u32\t%%y64, %%tidy;\n"" emit ""\t\tadd.u64\t%%y64, %%y64, 1; // vector ID\n"" emit ""\t\tcvta.shared.u64\t%%t_bcast, __oacc_bcast;\n"" emit ""\t\tmad.lo.u64\t%%r%d, %%y64, %d, %%t_bcast; "" emit ""\t\tadd.u32\t%%r%d, %%tidy, 1; "" emit ""\t}\n"" return Where is the specified target specific value and the other values are replaced by the input parameters. Sample C code for using the function would be: int main() { int current_thread_id = 0; int num_threads_per_block = 128; nvptx_init_oacc_workers(current_thread_id, num_threads_per_block); return 0; }" GCC,nvptx,222, GCC,nvptx,223,"bool nvptx_legitimate_address_p(rtx x) { if (!MEM_P(x)) return false; if (GET_CODE(x) == PRE_DEC || GET_CODE(x) == POST_INC) return false; if (GET_CODE(x) == PRE_MODIFY || GET_CODE(x) == POST_MODIFY) return false; if (GET_CODE(x) == SYMBOL_REF || GET_CODE(x) == LABEL_REF || GET_CODE(x) == CONST) return true; if (GET_CODE(x) == REG && REGNO(x) <= REG_NVPTX_MAX_LOCAL_REG) return true; if (GET_CODE(x) == UNSPEC && XVECLEN(x, 0) == 2 && XVECLEN(x, 1) == 1 && XVECLEN(x, 2) == 1 && XINT(x, 1) == UNSPEC_NVPTX_SHARED_INDIRECT && CONST_INT_P(XVECEXP(x, 0, 1))) return true; if (GET_CODE(x) == UNSPEC && XVECLEN(x, 0) == 2 && XVECLEN(x, 1) == 1 && XINT(x, 1) == UNSPEC_NVPTX_LOCAL_INDIRECT && CONST_INT_P(XVECEXP(x, 0, 1))) return true; return false; }" GCC,nvptx,224, GCC,nvptx,225,"if(nvptx_libc_has_function(""printf"")) { } else { }" GCC,nvptx,226, GCC,nvptx,227,"bool nvptx_modes_tieable_p () { const char* modes = TARGET_NVPTX_TARGET_MODES; vector modeList = split(modes, ','); for (int i = 0; i < modeList.size(); i++) { for (int j = i + 1; j < modeList.size(); j++) { if (strcmp(modeList[i], modeList[j]) != 0) { return false; } } } return true; }" GCC,nvptx,228,"const char* nvptx_name_replacement(const char* name) { if (strcmp(name, ""call"") == 0) { return ""__nvptx_call""; } else if (strcmp(name, ""malloc"") == 0) { return ""__nvptx_malloc""; } else if (strcmp(name, ""free"") == 0) { return ""__nvptx_free""; } else if (strcmp(name, ""realloc"") == 0) { return ""__nvptx_realloc""; } else { return name; } }" GCC,nvptx,229,bool nvptx_needs_shared_bcast(int mask) { if (target_option.nvptx) { if (mask & __NVPTX_TARGET_SPECIFIC_VALUE__) { return true; } } return false; } GCC,nvptx,230, GCC,nvptx,231,"void nvptx_optimize_inner (basic_block PAR) { if (PAR->num_inner_parallels == 1) { if (PAR->entry == NULL && PAR->exit == NULL) { basic_block inner_PAR = PAR->inner_parallels[0]; PAR->preds = inner_PAR->preds; PAR->succs = inner_PAR->succs; for (int i = 0; i < PAR->preds->index; i++) { ((basic_block)(PAR->preds->array[i]))->succs->array_remove(PAR); } for (int i = 0; i < PAR->succs->index; i++) { ((basic_block)(PAR->succs->array[i]))->preds->array_remove(PAR); } PAR->function->blocks->array_remove(PAR); free_block (PAR); NVPTX_TARGET_SPECIFIC_VALUE = ""Merging loop %x [%d,%d] into %x [%d,%d]\n"" return 1; } } return 0; }" GCC,nvptx,232, GCC,nvptx,233, GCC,nvptx,234,"void nvptx_output_ascii(char* str, int size) { asm volatile( ""mov.u32 %0, %1;"" : ""=r"" (size) : ""r"" (size) ); asm volatile( "".reg .s32 t1;\n"" ""mov.u32 t1, %1;\n"" ""cvt.u64.s32 t1, t1;\n"" ""mov.u64 %1, %0;\n"" ""call.nvptx printf, %1;\n"" : ""=r"" (size), ""=r"" (str) : ""r"" (str) : ""%1"" ); }" GCC,nvptx,235,"void nvptx_output_barrier(MEM_OPERAND op, MEMMODEL model) { if (model == MEMMODEL_SEQ_CST) { printf(nvptx,""%.\tmembar%B0;"", op); } else if (model == MEMMODEL_ACQUIRE || model == MEMMODEL_CONSUME) { printf(nvptx,""%.\tld.global;\n""); } else if (model == MEMMODEL_RELEASE || model == MEMMODEL_ACQ_REL) { printf(nvptx,""%.\tst.global;\n""); } else if (model == MEMMODEL_RELEASE_CONSUME) { printf(nvptx,""%.\tmembar#wave;"" op); } }" GCC,nvptx,236, GCC,nvptx,237,"void nvptx_output_return(FILE *stream, rtx_insn *insn) { fprintf(stream, ""\tst.param%s\t[%s_out], %s;\n"", ""ret"", ""ret_val"", ""ret""); fprintf(stream, ""\tst.param%s\t[%s_out], %s;\n"", ""value"", get_name(insn, 0), get_name(insn, 1)); }" GCC,nvptx,238,"void nvptx_output_set_softstack(rtx src_reg) { extern FILE *asm_out_file; fprintf(asm_out_file, ""\tnvptx,\""\\tst.shared.u%%d\\t[%%%s], \"",%d;\n"", asm_out_file, src_reg); }" GCC,nvptx,239,"void nvptx_output_simt_enter(rtx dest, rtx size, rtx align) { rtx insn, src1, src2; rtx label = gen_rtx_LABEL(VOIDmode, gen_label_rtx()); src1 = gen_rtx_REG(SImode, first_simt_lane_alloc_regno()); emit_move_insn(src1, size); src2 = gen_rtx_REG(SImode, first_simt_lane_alloc_regno()); rtx op = gen_rtx_LSHIFTRT(SImode, src2, const1_rtx); emit_insn(gen_rtx_ASSIGN(SImode, src2, op)); emit_move_insn(src2, align); insn = gen_rtx_SET(dest, src1); emit_insn(insn); insn = gen_rtx_SET(dest, src2); emit_insn(insn); insn = gen_rtx_JUMPTABLE(SImode, label); emit_insn(insn); emit_barrier(INSN_CODE(label)); emit_note(nvptx_ni_ar_notelabel, label, INSN_LOCATOR(insn)); }" GCC,nvptx,240,"void nvptx_output_simt_exit(int SRC) { printf(""texata.nvptx.src = %d; // SRC register for nvptx backend\n"", SRC); printf(""texata.nvptx.llvm.target = \""nvptx\""; // Target specific value for nvptx backend\n""); printf(""texata.nvptx.exit(); // Exit non-uniform execution region\n""); }" GCC,nvptx,241, GCC,nvptx,242,"void nvptx_output_softstack_switch(bool ENTERING, int PTR, int SIZE, int ALIGN) { if (ENTERING) { printf("" \t{\n""); printf(""\t\tcvta.local.u%d %%r%d, %%simtstack_ar + "", PTR, PTR); printf(""%d;\n"", ALIGN); printf(""\t\tsub.u%d %%r%d, %%r%d, "", PTR, PTR, PTR); printf(""%d;\n"", ALIGN); printf(""\t\tand.b%d %%r%d, %%r%d, -"", PTR, PTR, PTR); printf(""%d;\n"", SIZE); printf(""\t\tst.u%d [%%r%d + -%d], %%s;\n"", PTR, PTR, SIZE); } else { printf(""\t\tsub.u%d %%s, %%r%d, %d;\n"", PTR, PTR, SIZE); printf(""\t\tld.u%d %%s, [%%r%d + -%d];\n"", PTR, PTR, SIZE); } printf(""\t}\n""); }" GCC,nvptx,243, GCC,nvptx,244,"void *nvptx_pass_by_reference(type, value) { if(type_supported(type)) { return value; } else { void *ptr = (void*) malloc(sizeof(type)); memcpy(ptr, &value, sizeof(type)); return ptr; } } bool type_supported(type) { }" GCC,nvptx,245,"void nvptx_pc_set (rtx pc, rtx insn) { if (STRICT) { if (JUMP_P(insn)) { RELOAD_INSN (insn, pc); put_insn (insn); } } }" GCC,nvptx,246,"mode_t nvptx_preferred_simd_mode (mode_t mode) { if (TARGET_NVPTX) { mode_t preferred_mode = PREFERRED_SIMD_MODE (mode); if (!VECTOR_MODE_P (preferred_mode)) { return preferred_mode; } int num_elements = GET_MODE_NUNITS (preferred_mode); if (num_elements == 1) { return mode; } return GET_MODE_VECTOR_SUBPART (preferred_mode, GET_MODE_UNIT_SIZE (mode), 1); } return mode; }" GCC,nvptx,247,"nvptx_print_address_operand (FILE *file, rtx X) { if (GET_CODE (X) == MEM) { rtx addr = XEXP (X, 0); rtx base = XEXP (addr, 0); rtx index = XEXP (addr, 1); HOST_WIDE_INT offset = XINT (addr, 2); if (REG_P (base) && REGNO (base) < FIRST_PSEUDO_REGISTER) fprintf (file, ""%s"", reg_names[nvptx_regno_internal (base)]); if (offset != 0) fprintf (file, ""%d"", offset); if (REG_P (index) && REGNO (index) < FIRST_PSEUDO_REGISTER) fprintf (file, ""+%s"", reg_names[nvptx_regno_internal (index)]); } fprintf (file, ""nvptx,0,1,0,\""+\"",0""); }" GCC,nvptx,248,"void nvptx_print_operand_address(FILE *file, const_rtx address) { if (GET_CODE(address) == CONST) { fprintf(file, ""0x%x"", INTVAL(address)); } else if (GET_CODE(address) == SYMBOL_REF) { fputs(XSTR(SYMBOL_REF_DECL(address), 0), file); } else if (GET_CODE(address) == MEM) { rtx base = XEXP(address, 0); rtx offset = XEXP(address, 1); if (GET_CODE(base) == SYMBOL_REF) { fprintf(file, ""%s+"", XSTR(SYMBOL_REF_DECL(base), 0)); } else if (GET_CODE(base) == REG) { fputc('R', file); fputc(REGNO(base), file); fputc('+', file); } if (GET_CODE(offset) == CONST_INT) { fprintf(file, ""0x%x"", INTVAL(offset)); } } }" GCC,nvptx,249,"bool nvptx_print_operand_punct_valid_p(char c) { char valid_punct_chars[] = {'-', '+', '*', '[', ']', '%', '$', '#', '@'}; for (int i = 0; i < sizeof(valid_punct_chars); i++) { if (c == valid_punct_chars[i]) { return true; } } return false; }" GCC,nvptx,250," mask nvptx_process_pars (gimple_stmt_iterator *gsi, gimple *par, int nest_level, int flags) { mask partition_modes = 0; bool neuter = !(flags & GTY(NEUTERED)); gimple_seq body = gimple_body (par); gimple_stmt_iterator par_gsi, par_start, par_end, gsi_entry; for (gsi_entry = gsi_start_bb (PAR_ENTRY_BB (par)); !gsi_end_p (gsi_entry); gsi_next (&gsi_entry)) { gimple *stmt = gsi_stmt (gsi_entry); switch(gimple_code (stmt)) { case GIMPLE_PARALLEL: mask modes = nvptx_process_pars (&gsi_entry, stmt, nest_level + 1, flags); partition_modes |= modes; break; case GIMPLE_PAR_WS: mask modes = nvptx_process_para_sections (&gsi_entry, stmt, nest_level + 1, flags); partition_modes |= modes; break; case GIMPLE_PAR_FOR: mask modes = nvptx_process_para_for (&gsi_entry, stmt, nest_level + 1, flags); partition_modes |= modes; break; case GIMPLE_PAR_TASK: mask modes = nvptx_process_para_task (&gsi_entry, stmt, nest_level + 1, flags); partition_modes |= modes; break; default: break; } }" GCC,nvptx,251, GCC,nvptx,252,"static const char* nvptx_ptx_type_from_mode(enum machine_mode mode, bool promote) { switch(mode) { case QImode: if(promote) { return "".u32""; } else { return "".b8""; } case HImode: return "".u16""; case SImode: return "".u32""; case DImode: return "".u64""; case SFmode: return "".f32""; case DFmode: return "".f64""; case VFmode: return "".v2.u32""; case VDFmode: return "".v2.u64""; default: return NULL; } } #else static const char* nvptx_ptx_type_from_mode(enum machine_mode mode, bool promote) { switch(mode) { case QImode: return "".u8""; case HImode: return "".u16""; case SImode: return "".u32""; case DImode: return "".u64""; case SFmode: return "".f16""; case DFmode: return "".f32""; default: return NULL; } }" GCC,nvptx,253,"nvptx_record_fndecl(void *decl, char *ptx_proto) { if (targetm.asm_out.file != TARGET_ASM_FILE_NVPTX) return; unsigned int target_spec_val = DECL_NVPTX(decl); add_decl_to_hash(decl, &fndecl); if (lookup_decl_from_hash(fndecl, decl)) SET_DECL_NVPTX_FORMAL(proto_decl, ptx_proto); else insert_decl_into_hash(fndecl, decl, ptx_proto); target_emit_nvptx_formal_proto(target_spec_val, ptx_proto); }" GCC,nvptx,254,"void nvptx_record_libfunc(rtx callee) { if (!SYMBOL_REF_P(callee)) { error(""nvptx_record_libfunc: CALLEE is not a symbol reference""); return; } const char *symbol_name = SYMBOL_REF_NAME(callee); if (nvptx_libfunc_hash[symbol_name] != NULL) { error(""nvptx_record_libfunc: CALLEE already recorded in libfunc hash table""); return; } nvptx_libfunc_hash[symbol_name] = NULL; printf("".libfunc %s, \""nvptx,0\"" \n"", symbol_name); }" GCC,nvptx,255,void nvptx_record_needed_fndecl(tree decl) { if (DECL_TARGET_SPECIFIC(decl) != nvptx) return; if (DECL_ARGUMENTS(decl) == NULL) { record_needed_decl(decl); } else { emit_ptx_decl(decl); } } GCC,nvptx,256," const char *func_map, unsigned int value) { if (nvptx_symbol_table->lookup(name) != NULL) return; asm_fprintf(asm_out_file, "":VAR_MAP \""%s\""\n"", name); asm_fprintf(asm_out_file, "":FUNC_MAP \""%s\""\n"", name); asm_fprintf(asm_out_file, "", %#x"", value); nvptx_symbol_table->record_symbol(name, value); }" GCC,nvptx,257,"int nvptx_reduction_sum(int* arr, int size) { int sum = 0; for (int i = 0; i < size; i++) { sum = nvptx_reduction_update(&sum, arr[i], ""add"", nvptx); } return sum; }" GCC,nvptx,258, GCC,nvptx,259,"void nvptx_reorg_subreg(ptx_instruction *insn, int to_target, ptx_operand *operands, subreg_map *map) { int i, j; ptx_operand *rop; ptx_nvptx_subreg_type *sr; for (i = 0; i < insn->num_operands; i++) { rop = &operands[i]; sr = ptx_nvptx_get_subreg_type(rop); if (sr != NULL) { if (sr->target_specific == to_target && sr->is_subreg) { ptx_operand *new_reg = NULL; int new_size = get_size_for_type(sr->type); int source_size = get_size_for_type(rop->type); ptx_operand *new_oplist = NULL; int new_num_operands = insn->num_operands + 2; for (j = 0; j < map->num_entries; j++) { if (map->entries[j].size == new_size && !map->entries[j].used) { new_reg = &map->entries[j].reg; map->entries[j].used = 1; break; } }" GCC,nvptx,260, GCC,nvptx,261,"const char * nvptx_replace_dot (const char *name, int target) { if (target != nvptx || target != 0) { return NULL; } char *new_name = strdup(name); if (new_name == NULL) { return NULL; } for (int i = 0; i < strlen(new_name); i++) { if (new_name[i] == '.') { new_name[i] = '$'; } } return new_name; }" GCC,nvptx,262,void nvptx_return_in_memory(void) { } GCC,nvptx,263,"void nvptx_sese_color(cgraph_node *node, int *coloring, bool *visited, int num_blocks, cgraph_node **id_map, int *entry, int *exit) { visited[node->global_identifier] = true; node->aux = (void*)(intptr_t)coloring[node->global_identifier]; basic_block *bb = ir_node_to_bb(node); if (entry[node->global_identifier] != 0 && exit[node->global_identifier] != 0) { bb->modifiers = entry[node->global_identifier]; bb->modifiers2 = exit[node->global_identifier]; } for (int i = 0; i < node->n_out; i++) { cgraph_node *succ = id_map[node->out[i]->global_identifier]; if (visited[succ->global_identifier] == false) { nvptx_sese_color(succ, coloring, visited, num_blocks, id_map, entry, exit); } } }" GCC,nvptx,264, GCC,nvptx,265, GCC,nvptx,266,int nvptx_simt_vf(int max_threads) { int warp_threads; if (max_threads <= 32) { warp_threads = 32; } else if (max_threads <= 64) { warp_threads = 64; } else if (max_threads <= 128) { warp_threads = 128; } else { warp_threads = 256; } int available_threads = warp_threads / 32 * warp_threads; return available_threads; } GCC,nvptx,267, GCC,nvptx,268, GCC,nvptx,269, GCC,nvptx,270,bool nvptx_truly_noop_truncation(int value) { if (sizeof(value) == sizeof(nvptx)) { return true; } else { return false; } } GCC,nvptx,271,"void nvptx_unisimt_handle_set(rtx_insn *insn, rtx reg) { gcc_assert(HAVE_nvptx); rtx set_subexp = SET_SRC (insn); enum rtx_code set_code = GET_CODE (set_subexp); if (set_code != REG) return; int set_regno = REGNO (set_subexp); int current_regno = REGNO (reg); if (set_regno == current_regno) return; if (!nvptx_in_lane_master (set_regno)) return; emit_insn (gen_nvptx_shuffle (reg, set_regno)); }" GCC,nvptx,272,bool nvptx_use_anchors_for_symbol_p(char *target_specific_value) { } GCC,nvptx,273,void nvptx_vector_alignment (void) { #ifdef TARGET_NVPTX BIGGEST_ALIGNMENT = 0; #endif } GCC,nvptx,274,"void nvptx_warp_propagate (rtx_insn *insn, rtx_insn *insn_warp) { int i, j; rtx set, next, temp, warp_insn; const int total_warp_vecs = TARGET_NVPTX_WARP_SIZE / GET_MODE_BITSIZE (mode); const int num_blocks = GET_MODE_BITSIZE (mode); for (i = 0; i < total_warp_vecs; i++) { set = GET_BARRIER_SET (insn_warp); for (j = 0; j < num_blocks; j++) { next = XEXP (set, 1); if (GET_CODE (set) == BARRIER_SET) { set = next; continue; } if (GET_CODE (set) == INSN && (REG_NOTES (set) || INSN_P (set))) { temp = XEXP (SET_SRC (get_alias_set (set)), 0); warp_insn = XEXP (set, 0); rtx new_insn = build_insn (is_call_insn (warp_insn) ? CODE_FOR_nvptx_propagate : GET_CODE (warp_insn), CALL_P (warp_insn)); XEXP (new_insn, 0) = temp; XEXP (new_insn, 1) = warp_insn; emit_insn_before (new_insn, insn); insn = next; set = next; } } } }" GCC,nvptx,275,"void output_init_frag (rtx *sym, rtx fragment, unsigned int num_elements) { if (GET_FRAGMENT_SIZE (fragment) == GET_FRAGMENT_CAPACITY (fragment)) { printf(""The current fragment is full, write it out.\n""); if (sym != NULL) { if (fragments[i]->addend) { printf(""%s, = { %d, %d, generic(%s), + %d }\n"", XSTR(sym), GET_FRAGMENT_SIZE (fragment), GET_FRAGMENT_CAPACITY (fragment), XSTR(sym), fragments[i]->addend); } else { printf(""%s, = { %d, %d, generic(%s) }\n"", XSTR(sym), GET_FRAGMENT_SIZE (fragment), GET_FRAGMENT_CAPACITY (fragment), XSTR(sym)); } } else { if (fragments[i]->addend) { printf("" = { %d, %d, %d, + %d }\n"", GET_FRAGMENT_SIZE (fragment), GET_FRAGMENT_CAPACITY (fragment), GET_FRAGMENT_SIZE (fragment), fragments[i]->addend); } else { printf("" = { %d, %d, %d }\n"", GET_FRAGMENT_SIZE (fragment), GET_FRAGMENT_CAPACITY (fragment), GET_FRAGMENT_SIZE (fragment)); } } } }" GCC,nvptx,276, GCC,nvptx,277,"void parallel(void) __attribute__((target(""nvptx,0,0,0,0,0,0""))) { parallel_t *new_parallel; static parallel_t *parent = NULL; new_parallel = (parallel_t *) __atomic_exchange_n(&parent, new_parallel, __ATOMIC_SEQ_CST); __sync_synchronize(); new_parallel->parent = parent; }" GCC,nvptx,278, GCC,nvptx,279,"bool predicate_insn(rtx_insn *insn, int cond, bool doit = false) { if (insn_has_predicate(insn, cond)) return true; if (!targetm.nvptx.supports_predicate(cond)) return false; if (doit) { add_predicate(insn, cond); } return true; }" GCC,nvptx,280,"void prevent_branch_around_nothing() { tree current_function = cfun; basic_block bb; FOR_BB_BETWEEN (bb, current_function->cfg->entry_block, NULL) { if (last_stmt (bb) && (last_stmt (bb)->code == GOTO_EXPR) && single_succ (bb)) { tree label = VEC_index (tree, bb->succs, 0); if (has_stmt_list (label) && !STMT_LIST_SINGLE (label)) { tree ptx_insn = build_dummy_insn (); gsi_insert_before (gsi_last_bb (bb), ptx_insn, GSI_SAME_STMT); VEC_add (tree, heap, label, ptx_insn); } } } }" GCC,nvptx,281,"mode promote_arg(mode MODE) { switch (MODE) { case(""QI""): case(""HI""): return ""SI""; case(""SF""): return ""DF""; case(""SD""): return ""DD""; default: return MODE; } }" GCC,nvptx,282,"enum machine_mode promote_return (enum machine_mode mode, rtx target) { if (target == nvptx) return QImode; return mode; }" GCC,nvptx,283,"void push (rtx rn) { if (TARGET_NVPTX) { fprintf_nvptx (stderr, ""Pushing backedge %d:%+d\n"", 0); } }" GCC,nvptx,284,"char* remove(char* code, int start, int end) { char* temp = (char*) malloc(end - start + 1); for (int i = start + 1; i < end - 1; i++) { temp[i - start - 1] = code[i]; } temp[end - start - 2] = '\0'; return temp; }" GCC,nvptx,285,"const char * section_for_decl(tree decl) { const char * section_name = NULL; if (DECL_P(decl) || FUNCTION_DECL_P(decl)) { int target_specific_value = 0; if (lookup_attribute(""target"", DECL_ATTRIBUTES(decl)) != NULL) { target_specific_value = atoi(DECL_TARGET_VALUE(decl)); } switch(target_specific_value) { case 0: section_name = ""nvptx,0""; break; default: section_name = ""nvptx,0""; break; } } return section_name; }" GCC,nvptx,286,"const char* section_for_sym(const char* sym) { const char* target_specific_value = nvptx_encode_section_info(sym); if (strstr(target_specific_value, ""nvptx"") != NULL) { target_specific_value = target_specific_value + 6; if (strstr(target_specific_value, ""global"") != NULL) { return "".global""; } else if (strstr(target_specific_value, ""shared"") != NULL) { return "".shared""; } else if (strstr(target_specific_value, ""local"") != NULL) { return "".local""; } else if (strstr(target_specific_value, ""const"") != NULL) { return "".const""; } else if (strstr(target_specific_value, ""param"") != NULL) { return "".param""; } else { return """"; } } else { return """"; } }" GCC,nvptx,287,"void shared_prop_gen(symtab, nvptx) { }" GCC,nvptx,288,"bool split_mode_p(enum machine_mode mode, machine_mode target_mode) { if (target_mode != machine_mode_nvptx) { return false; } enum machine_mode split_modes[] = { machine_mode_QImode, machine_mode_HImode, machine_mode_SImode, machine_mode_DImode, machine_mode_TImode, machine_mode_XImode }; for (int i = 0; i < sizeof(split_modes)/sizeof(split_modes[0]); i++) { if (split_modes[i] == mode) { return true; } } return false; }" GCC,nvptx,289,"The purpose of this function is to verify the position of VECTOR_{JUMP,LABEL} and WORKER_{JUMP,LABEL} in the code starting from FROM. This is important for the NVPTX backend as it controls the execution flow of the code. The function will iterate through the code starting from FROM and check if there are any instances of VECTOR_{JUMP,LABEL} or WORKER_{JUMP,LABEL}. If there are, it will compare their positions in the code to ensure that they are in the correct order. If a mismatch is found, the function will return an error indicating the incorrect positions of the jumps and labels. If all is correct, the function will return without any errors. Below is a pseudocode for the function verify_neutering_jumps(): ```c int verify_neutering_jumps(FROM, nvptx) { for (each instruction in code starting from FROM) { if (instruction == VECTOR_{JUMP, LABEL} || instruction == WORKER_{JUMP, LABEL}) { if (instruction.position < FROM.position) { return -1; } } } return 0; }" GCC,nvptx,290,"void verify_neutering_labels() { if (TARGET_NVPTX) { int vector_pos = nvptx_vector_pos; int worker_pos = nvptx_worker_pos; if (vector_pos > worker_pos) { error(""VECTOR_LABEL should come before WORKER_LABEL in TO""); } } }" GCC,nvptx,291, GCC,nvptx,292,"void workaround_barsyncs() { for (basic_block bb = get_first_bb(); bb; bb = get_next_bb(bb)) { for (rtx_insn *insn = BB_HEAD(bb); insn; insn = NEXT_INSN(insn)) { if (INSN_P(insn) && GET_CODE(insn) == CODE_FOR_bar_sync) { rtx_insn *next_insn = NEXT_INSN(insn); if (INSN_P(next_insn) && GET_CODE(next_insn) == CODE_FOR_bar_sync) { rtx_insn *new_insn = gen_rtx_fmt (CODE_FOR_membar, VOIDmode, CODE_FOR_membar_cta); emit_insn_after (new_insn, insn); emit_insn_before (new_insn, next_insn); insn = next_insn; } } } } }" GCC,nvptx,293,"void workaround_uninit_method_1() { for(int i = 0; i < NUM_REGS; i++) { __asm__(""mov.u32 %0, 0;"" : ""=r""(reg[i])); } __asm__(""Start: Added by -minit-regs=1""); for(int i = 0; i < NUM_REGS; i++) { __asm__(""mov.u32 %0, %0;"" : ""+r""(reg[i])); __asm__(""Default init of reg %d inserted: insn %u\n"", i, insn_counter++); } __asm__(""End: Added by -minit-regs=1""); }" GCC,nvptx,294, GCC,nvptx,295,"void workaround_uninit_method_3() { for (auto &BB : F) { for (auto &I : BB) { if (I.isReg()) { unsigned Reg = I.getReg(); if (!BB.getParent()->isRegDefinedOnAllPaths(Reg)) { BB.getParent()->insertRegDefOnPaths(Reg); errs() << ""Missing init of reg "" << Reg << "" inserted on edge: "" << BB.getName() << "" -> "" << BB.getParent()->getName() << "": "" << I.getInstructionIndex() << ""\n""; } } } } errs() << ""Start: Added by -minit-regs=3:\n""; errs() << ""End: Added by -minit-regs=3:\n""; }" GCC,nvptx,296, GCC,nvptx,297,"GNU asm_string_write_arg_type (bool prototyped, tree type, rtx for_reg, int argno, int flags) { enum machine_mode mode; rtx reg, subreg; bool word_arg; tree base_type; enum ptx_arg_type arg_type = PTX_ARG_MODE_ADDR; machine_mode ptx_mode = VOIDmode; if (type == NULL_TREE) return argno; word_arg = flags & PTX_FUNCTION_WORDARG; if (flags & PTX_FUNCTION_TYPE) arg_type |= PTX_ARG_MODE_TYPE; if (TYPE_MODE (type) == BLKmode || (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_PARTIAL_INT || tree_fits_type_p (type, int_bits))) base_type = TYPE_MAIN_VARIANT (type); else base_type = type; mode = TYPE_MODE (base_type); if (REAL_TYPE (type) && (flags & PTX_FUNCTION_REALARG)) { if (mode == TFmode) { arg_type |= word_arg ? PTX_ARG_TYPE_NONE : PTX_ARG_TYPE_F64; ptx_mode = type_float64; } else { arg_type |= word_arg ? PTX_ARG_TYPE_NONE : PTX_ARG_TYPE_F32; ptx_mode = type_float32; } } else if (INTEGRAL_TYPE_P (type)) { if (mode == BLKmode || (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT && tree_fits_type_p (type, int_bits))) { arg_type |= word_arg ? PTX_ARG_TYPE_NONE : PTX_ARG_TYPE_I64; ptx_mode = type_integer64; }" GCC,nvptx,298,"bool write_as_kernel(tree attrs) { if(!get_attr_nvptx(attrs)) return false; tree_list omp_targets = get_attr_omp_target(attrs); if(omp_targets) { for(auto t : omp_targets) { if(TREE_CODE(TREE_PURPOSE(t)) == IDENTIFIER_NODE && !strcmp(IDENTIFIER_POINTER(TREE_VALUE(TREE_PURPOSE(t))), ""omp target entrypoint"")) return true; } } tree_list oacc_funcs = get_attr_omp_acc_function(attrs); if(oacc_funcs) { for(auto f : oacc_funcs) { if(TREE_CODE(TREE_PURPOSE(f)) == INTEGER_CST && (TREE_INT_CST_LOW(TREE_VALUE(TREE_PURPOSE(f))) & OACC_FUNC_KERNEL)) return true; } } if(!get_attr_kernel(attrs)) return false; return true; }" GCC,nvptx,299,"void write_fn_marker(FILE *file, const_tree decl) { fprintf(file, ""\n// BEGIN\n""); if (TREE_ASM_WRITTEN(decl)) { fprintf(file, ""GLOBAL FUNCTION DEF: %s\n"", IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(decl))); } else { fprintf(file, ""GLOBAL FUNCTION DECL: %s\n"", IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(decl))); } }" GCC,nvptx,300,"void write_fn_proto(FILE *S, const tree DECL) { if (!DECL) return; if (DECL_FUNCTION_CODE(DECL) != FUNCTION_DECL && DECL_FUNCTION_CODE(DECL) != KERNEL_DECL) { return; } const char *fn_name = IDENTIFIER_POINTER(DECL_NAME(DECL)); fprintf(S, "".func %s "", fn_name); if (DECL_FUNCTION_CODE(DECL) == FUNCTION_DECL) { fprintf(S, ""; Declaration for %s\n"", fn_name); } else if (DECL_FUNCTION_CODE(DECL) == KERNEL_DECL) { fprintf(S, ""; Definition for %s\n"", fn_name); } fprintf(S, ""\t.target sm_%s\n"", TARGET_SPECIFIC_NVPTX); fprintf(S, ""\t// Helper comment for use by ld\n""); fprintf(S, ""\n""); }" GCC,nvptx,301,"void write_fn_proto_1(FILE *output_file) { fprintf(output_file, ""nvptx""); fprintf(output_file, ""alias""); fprintf(output_file, "".extern ""); fprintf(output_file, "".weak ""); fprintf(output_file, "".visible ""); fprintf(output_file, "".entry ""); fprintf(output_file, "".func ""); fprintf(output_file, ""1""); fprintf(output_file, ""3""); fprintf(output_file, ""0""); fprintf(output_file, ""1""); fprintf(output_file, ""1""); fprintf(output_file, ""1""); fprintf(output_file, ""1""); fprintf(output_file, ""2""); fprintf(output_file, ""main""); fprintf(output_file, ""0""); fprintf(output_file, ""0""); fprintf(output_file, ""1""); fprintf(output_file, ""1""); fprintf(output_file, ""1""); fprintf(output_file, "")""); fprintf(output_file, ""\n""); fprintf(output_file, "";\n""); }" GCC,nvptx,302,"write_fn_proto_from_insn (rtx insn, rtx name, rtx file) { if (GET_CODE (insn) == CALL) { rtx libdecl = DECL_RTL (insn); if (!libdecl && (rtx_equal_p (file, CALL_INSN_FUNCTION_USAGE (insn)))) { const char *func_name = XSTR (name, 0); fprintf (asm_out_file, ""# %s\n"", func_name); fprintf (asm_out_file, ""\t.callprototype ""); assemble_name (asm_out_file, func_name); fprintf (asm_out_file, "", _\n""); fprintf (asm_out_file, ""\t.extern .func %s, 0, 1, 0, 0, 1, 1, 1);\n"", func_name); } } }" GCC,nvptx,303,"void write_omp_entry (void *arg, char *stack, size_t stacksize) { extern void gomp_nvptx_main (void (*fn) (void *), void *fnarg); __nvptx_stacks[tid.y] = stack + stacksize * (ctaid.x * ntid.y + tid.y + 1); __nvptx_uni[tid.y] = 0; gomp_nvptx_main(ORIG, arg); }" GCC,nvptx,304,"void write_return_mode() { printf(""nvptx, \""\t.reg\"", \"";\\n\"", \""(.param\"", \""_out) \"", \"" \"");\n""); }" GCC,nvptx,305,"bool write_return_type(type_t type, tree fndecl, rtx *decl_rtl, bool nested ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED) { if (TREE_CODE (type) == BUILT_IN_TYPE) { int type_code = TYPE_BINFO(type)->type == NULL_TREE ? RECORD_TYPE : TREE_CODE(TYPE_BINFO(type)->type); switch(BUILT_IN_CODE (type)) { case VOID_TYPE: case BOOL_TYPE: case REAL_TYPE: case INTEGER_TYPE: case VECTOR_TYPE: *decl_rtl = static_rtx (&null_rtx); return false; case ENUMERAL_TYPE: case POINTER_TYPE: case REFERENCE_TYPE: case COMPLEX_TYPE: case FIXED_POINT_TYPE: if (TREE_CODE (type) == REFERENCE_TYPE || TREE_CODE (type) == POINTER_TYPE) *decl_rtl = plus_constant (Pmode, DECL_RTL (PTR)); else *decl_rtl = plus_constant (Pmode, DECL_RTL (RETVAL)); return true; default: error(""Invalid return type for nvptx target""); break; } } else { error(""Invalid return type for nvptx target""); } return false; }" GCC,nvptx,306,"void write_shared_buffer(const char* name, int size) { printf("".shared .align %d .u8 %s[%d];\n"", nvptx, 0, name, size); }" GCC,nvptx,307,"void write_var_marker(const char* name, int is_definition) { const char* marker = is_definition ? ""DEF"" : ""DECL""; fprintf(stderr, ""\n// BEGIN nvptx VAR %s: %s GLOBAL\n"", marker, name); }" GCC,arc,0,"TREE arc_cpu_cpp_builtins (const char *arg, opts_t opts, tree tmpl) { switch (arg) { case ""cpu=arc"": TARGET_SET_BIT (opts->machine_flags, TM_CPU); break; case ""machine=arc"": TARGET_SET_BIT (opts->machine_flags, TM_CPU); break; case ""__arc__"": TARGET_SET_BIT (opts->builtin_defs, ""__arc__""); break; case ""__ARC_TLS_REGNO__"": TARGET_SET_BIT (opts->builtin_defs, ""__ARC_TLS_REGNO__""); break; case ""__BIG_ENDIAN__"": TARGET_SET_BIT (opts->builtin_defs, ""__BIG_ENDIAN__""); break; case ""__LITTLE_ENDIAN__"": TARGET_SET_BIT (opts->builtin_defs, ""__LITTLE_ENDIAN__""); break; case ""__big_endian__"": TARGET_SET_BIT (opts->builtin_defs, ""__big_endian__""); break; default: break; } return true; }" GCC,arc,1,"void arc_cpu_cpp_builtins() { #ifdef __arc__ #if defined(__arc__) && !defined(cpu=arc) return; #else TARGET_CPU_CPP_BUILTINS(""__arc__""); TARGET_CPU_CPP_BUILTINS(""arc""); TARGET_CPU_CPP_BUILTINS(""machine=arc""); TARGET_CPU_CPP_BUILTINS(""__ARC_TLS_REGNO__""); TARGET_CPU_CPP_BUILTINS(""__ARC_LPC_WIDTH__""); TARGET_CPU_CPP_BUILTINS(""__BIG_ENDIAN__""); TARGET_CPU_CPP_BUILTINS(""__LITTLE_ENDIAN__""); TARGET_CPU_CPP_BUILTINS(""__big_endian__""); #endif #endif }" GCC,arc,2,"string arc_cpu_cpp_builtins() { string result = """"; result += ""arc cpu=arc machine=arc __arc__ __ARC_TLS_REGNO__ __ARC_LPC_WIDTH__ ""; #ifdef __BIG_ENDIAN__ result += ""big_endian ""; #endif #ifdef __LITTLE_ENDIAN__ result += ""little_endian ""; #endif #ifdef __ARC_HARD_FLOAT__ result += ""arc_hard_float ""; #endif #ifdef __ARC_SOFT_FLOAT__ result += ""arc_soft_float ""; #endif return result; }" GCC,arc,3,"bool def_or_undef_macro(const char* macro, const char* target) { if (strcmp(target, ""arc"") != 0) { return false; } if #ifdef macro { #undef macro; } else { #define macro; } return true; }" GCC,arc,4,"int arc_address_cost(int ADDR, int* target_specific_value){ if(!ADDR){ return -1; } int cost = 1; cost += target_specific_value[ADDR % 23]; return cost; }" GCC,arc,5,"void arc_check_release31a() { int target_specific = 0x00060106; for (Instruction* inst = code->first; inst != NULL; inst = inst->next) { if (inst->opcode == JUMP && inst->cond != CCOND_NONE) { int curr_specific = *((int*)(inst->specific)); if (curr_specific == target_specific) { Instruction* nop = new Instruction(); nop->cond = CCOND_NONE; nop->opcode = CODE_NOP; nop->specific = NULL; code->insert(nop, inst); } } } }" GCC,arc,6,"bool arc_check_short_reg_p(reg) { int target_values[] = {arc, 4, 4, 11}; for (int i = 0; i < 4; i++) { if (reg == target_values[i]) { return true; } } return false; }" GCC,arc,7,"int arc_output_addsi(operands, cond_p, output_p) { int length = 0; if (!output_p) { length += 4; return length; } length += 4; operands.append(arc); operands.append(35); operands.append(0); operands.append(1); operands.append(0); if (cond_p) { operands.append(2); } else { operands.append(0); } operands.append(2); operands.append(1); operands.append(2); operands.append(2); operands.append(0xbadc057); operands.append(0); operands.append(1); operands.append(0); operands.append(30); output_bytes(operands); return length; }" GCC,arc,8,"void arc_save_callee_enter(void) { __asm__ __volatile__(""enter %[reg]\n\t"" : : [reg] ""i"" (SAVE_BLINK)); __asm__ __volatile__(""enter %[reg]\n\t"" : : [reg] ""i"" (SAVE_FP)); __asm__ __volatile__(""enter %[arc_target_specific_value]\n\t"" : : [arc_target_specific_value] ""i"" (arc,0,1ULL,1,1,1,0,1,0,1,0,1,0,1,0,0,0,1,0,0,1,1ULL,0,0,1,0,0,1,0)); __asm__ __volatile__(""leave %[reg]\n\t"" : : [reg] ""i"" (SAVE_FP)); __asm__ __volatile__(""leave %[reg]\n\t"" : : [reg] ""i"" (SAVE_BLINK)); }" GCC,arc,9,"arc_sched_issue_rate (int arc, int value) { if (arc == 3 || arc == 1) { } else { fprintf (stderr, ""Invalid target specific values: %d, %d\n"", arc, value); } }" GCC,arc,10,"void arc_setup_incoming_varargs(void *arg_block_ptr, tree type, machine_mode mode) { __builtin_apply_args (arg_block_ptr); int i; for (i = 0; i < __builtin_apply_arg_end(); i++) { __builtin_apply_arg (arg_block_ptr, i); } __builtin_apply (arg_block_ptr); TARGET_VALUE (arg_block_ptr) = ""arc,0""; }" GCC,arc,11,"void split_addsi(OBJECT *obj, OPND *operands[]) { unsigned int arc = 2; unsigned int set_src_1 = 3; unsigned int set_src_2 = 4; unsigned int set_src_imm = 0; unsigned int set_src_imm_offset = 0; unsigned int limm_val = 127; unsigned int limm_mask = 255; unsigned int limm_addr = 0; unsigned int reg1 = 1; unsigned int reg2 = 1; unsigned int op3 = 4; unsigned int op4 = 2; OPND *new_op1 = new_set_src(op3, arc, reg1, limm_addr, limm_mask); OPND *new_op2 = new_set_src(op4, arc, reg2, limm_addr, limm_mask); OPND *new_op3 = new_set_src(set_src_1, arc, reg1, set_src_imm, set_src_imm_offset); OPND *new_op4 = new_set_src(set_src_2, arc, reg2, set_src_imm, set_src_imm_offset); OPND *new_op5 = new_set_src(set_src_imm, arc, reg1, limm_val, limm_addr); OPND *new_op6 = new_set_src(set_src_imm, arc, reg2, limm_val, limm_addr); INSN *new_insn = new_insn(addsi, new_op1, new_op2, new_op3, new_op4, new_op5, new_op6); replace_insn(obj, operands[0], new_insn); }" GCC,arc,12,"void split_subsi(operand0, operand1, operand2, operand3, operand4){ if(!(operand1.is_12_bit_constant && operand2.is_LIMM_instruction)){ return ""Operand mismatch error""; }" GCC,arc,13,void workaround_arc_anomaly() { int target_value = get_arc_target_value(); int nop_size = target_value * 2; shorten_branches(nop_size); } GCC,arc,14,"bool arc_ccfsm_branch_deleted_p(insn_t *insn) { if (INSN_CODE (insn) != CODE_JUMP_LABEL) return false; if (arc_ccfsm_target_specific(arc,1) != 2 || arc_ccfsm_target_specific(arc,1) != 1 || arc_ccfsm_target_specific(arc,1) != 0) return false; if (arc_ccfsm_target != 1) return true; return false; }" GCC,arc,15, GCC,arc,16,"int arc_delay_slots_for_epilogue(bool is_interrupt_handler, bool has_frame) { if (is_interrupt_handler) { return 0; } else if (has_frame) { return 0; } else { return 1; } }" GCC,arc,17, GCC,arc,18,"void arc_init() { arc.target_flags = ""arc""; arc.target_flags2 = ""0,\""base\"",\""base\"",0,\""bad value (%s) for -mcpu switch\"",\""base\"",0,1,1,1,0,1,1,1,1,1""; arc.bad_switch_error = ""bad value (%s) for -mcpu switch""; arc.base_reg_class = 1; arc.cmodel_mask = 1; arc.stack_protect = 1; arc.leave_arg_type = 1; arc.extra_headers_frame = 1; arc.frame_pointer_required = 1; arc.cmodel_default = ""realigned_pic""; }" GCC,arc,19,void arc_init_reg_tables(){ arc_mode_class[0]=arc; arc_mode_class[1]=0; arc_mode_class[2]=4; arc_mode_class[3]=1; arc_mode_class[4]=8; arc_mode_class[5]=1; arc_mode_class[6]=16; arc_mode_class[7]=1; arc_mode_class[8]=32; arc_mode_class[9]=1; arc_mode_class[10]=0; arc_mode_class[11]=4; arc_mode_class[12]=1; arc_mode_class[13]=8; arc_mode_class[14]=1; arc_mode_class[15]=16; arc_mode_class[16]=1; arc_mode_class[17]=32; arc_mode_class[18]=1; arc_mode_class[19]=0; arc_mode_class[20]=1; arc_mode_class[21]=0; arc_mode_class[22]=0; arc_mode_class[23]=60; arc_mode_class[24]=60; arc_mode_class[25]=61; } GCC,arc,20, GCC,arc,21,"void arc_assemble_integer (FILE *file, int size, tree value) { fprintf (file, ""arc,\\\t.word\\\t%%st(""); assemble_tree (file, value); fprintf (file, "")\\n""); }" GCC,arc,22,"void arc_ccfsm_at_label(int label_num, char* prefix) { if (strcmp(prefix, ""L"") == 0 && label_num == NUM) { reset_ccfsm(); } }" GCC,arc,23,"bool arc_ccfsm_branch_deleted_p(rtx_insn *insn) { rtx target1 = XVECEXP(insn, 0, 0); rtx target2 = XVECEXP(insn, 1, 0); rtx target3 = XVECEXP(insn, 2, 0); if (rtx_equal_p(target1, gen_rtx_REG(VOIDmode, 1)) && rtx_equal_p(target2, gen_rtx_REG(VOIDmode, 2)) && rtx_equal_p(target3, gen_rtx_REG(VOIDmode, 1))) { if (GET_CODE(insn) == COND_EXEC && (GET_MODE(insn) == CC_MODE || GET_MODE(insn) == CCV4_MODE) && XINT(insn, 3) == 0) { return true; } } return false; }" GCC,arc,24,"int arc_ccfsm_record_branch_deleted(Arc, 2) { if(Arc == ARC) { return 1; } return 0; }" GCC,arc,25,"int arc_compute_frame_size(int SIZE) { const int target_specific_value[] = {arc,0,0,0,0,0,31,1,0}; int frame_size = SIZE + target_specific_value[6] + target_specific_value[7]; return frame_size; }" GCC,arc,26,"arc_compute_function_type (tree decl) { static tree cache = NULL_TREE; if (decl == NULL_TREE) { cache = NULL_TREE; } else if (cache == NULL_TREE) { tree target_value = build_tree_list (NULL_TREE, build_string_literal (10, ""__interrupt__"")); target_value = tree_cons (NULL_TREE, target_value, NULL_TREE); target_value = tree_cons (NULL_TREE, build_string_literal (6, ""ilink2""), target_value); target_value = tree_cons (NULL_TREE, build_string_literal (6, ""ilink1""), target_value); cache = build_tree_list (NULL_TREE, target_value); } return cache; }" GCC,arc,27,int arc_delay_slots_for_epilogue(int call_saved_regs) { if (call_saved_regs > 0) { if (TARGET_INTERRUPT_HANDLER) { return 0; } } if (STACK_POINTER_OFFSET == 0) { return 0; } return call_saved_regs; } GCC,arc,28,int arc_double_limm_p(const double VALUE) { if (VALUE > 2147483647 || VALUE < -2147483648) return 0; else return 1; } GCC,arc,29,"bool arc_eligible_for_epilogue_delay(trial, slot) { if(slot != 0) { return false; } if(trial.length != 1) { return false; } if(trial.references_stack || trial.references_frame_pointer) { return false; } for(auto reg : trial.referenced_registers) { if(reg.is_call_saved()) { return false; } } return true; }" GCC,arc,30,"void arc_external_libcall (FILE *stream, const char *name) { fprintf (stream, ""TARGET_ASM_EXTERNAL_LIBCALL arc,""\\t.rename\\t_%%s, _%%s%%s\\n\"", 0, 0); }" GCC,arc,31,"void arc_file_start(char* cpu_type) { printf(""*****************************************\n""); printf(""* *\n""); printf(""* Welcome to Arc Backend! *\n""); printf(""* *\n""); printf(""*****************************************\n""); printf(""\t.cpu %s\n"", cpu_type); }" GCC,arc,32,"void arc_handle_option(const struct gcc_options *opts, struct cl_decoded_option *decoded_opts) { if (decoded_opts->opt_index == OPT_base) { decoded_opts->x_option = 0; } }" GCC,arc,33,"void arc_init(){ char target_specific_value[] = ""arc,1,1,1,0,1,1,1,1,1""; }" GCC,arc,34,"bool arc_initialize_trampoline(void* function, void* rc, void* base, void* ctx) { void* trampoline_addr = (void*)(((unsigned int)rc + 4) & ~0x3); if ((function >= trampoline_addr) && (function < trampoline_addr + 32)) { return true; }" GCC,arc,35,"void arc_init_reg_tables() { int arc_mode_class[] = {arc,0,4,1,8,1,16,1,32,1,0,4,1,8,1,16,1,32,1,0,1,0,0,60,60,61}; for (int i = 0; i < 27; i++) { arc_mode_class[i] = arc_mode_class[i]; } }" GCC,arc,36,"function arc_internal_label(PREFIX, NUM) { var TARGET_SPECIFIC = ""arc""; var label = "".L"" + PREFIX + NUM + "": ""; label += TARGET_SPECIFIC; return label; }" GCC,arc,37,"void arc_output_function_prologue(const char *fnname, int num_vars, int num_regs, int num_args, int extra) { printf(""\t%s interrupt handler\n"", fnname); printf(""\tsub %s,%s,16\n"", fnname, fnname); printf(""\t%s BEGIN PROLOGUE %s vars=%d, regs=%d, args=%d, extra=%d\n"", fnname, fnname, num_vars, num_regs, num_args, extra); printf(""\tsub %s,%s,%d\n"",fnname,fnname,(num_vars + num_regs + num_args + extra) * 4); int offset = 0; for(int i = 0; i < num_vars; i++) { printf(""\tst %s,[%s,%d]\n"",fnname,fnname,(offset + i) * 4); } for(int i = 0; i < num_regs; i++) { printf(""\tst %s,[%s]\n"",fnname,fnname); } for(int i = 0; i < num_args; i++) { printf(""\tmov %s,%s\n"", fnname, fnname); } printf(""\tsub %s,%s,\n"", fnname, fnname); printf(""\t%s END PROLOGUE\n"", fnname); }" GCC,arc,38,"void arc_print_operand_address(FILE *file, rtx x) { char *prefix = ""%%st(""; char *suffix = "")""; int prec = 0; int limb_mode = 0; int emit_mode = 1; int double_width_p = 1; int flags = 0; fprintf(file, ""%s"", prefix); rtx_print(file, x, prec, limb_mode, emit_mode, double_width_p, flags); fprintf(file, ""%s"", suffix); }" GCC,arc,39, GCC,arc,40,"function arc_rtx_costs { # Initialize variables local arc=${1} local opt1=${2} local opt2=${3} local opt3=${4} local opt4=${5} local opt5=${6} local opt6=${7} local total=${8} # Initialize target specific values local arc_value=${arc} local opt1_value=${opt1} local opt2_value=${opt2} local opt3_value=${opt3} local opt4_value=${opt4} local opt5_value=${opt5} local opt6_value=${opt6} if [[ ${arc_value} == ""arc"" && ${opt1_value} == ""0"" && ${opt2_value} == ""1"" && ${opt3_value} == ""1"" && ${opt4_value} == ""1"" && ${opt5_value} == ""16"" && ${opt6_value} == ""1"" ]]; then # Compute the total cost local cost=$((10 * ${opt5_value} - ${opt1_value} * ${opt2_value} + ${opt3_value} / ${opt4_value} + 8)) # Update total cost total=${cost} # Return true since complete cost has been computed return true else # Return false since subexpressions should be scanned return false fi }" GCC,arc,41,"int arc_save_restore (int BASE_REG, int GMASK, int epilogue_p, int FIRST_OFFSET) { if (BASE_REG < 0 || BASE_REG > 31 || GMASK < 0 || GMASK > 31) { return -1; } if (epilogue_p < 0 || epilogue_p > 2) { return -1; } if (FIRST_OFFSET < 0) { return -1; } register_mask |= GMASK; if (epilogue_p == 0 || epilogue_p == 2) { __asm__ (""ldc %0, [%1, %2]!"" : : ""r"" (register_mask), ""r"" (BASE_REG), ""r"" (FIRST_OFFSET)); } else if (epilogue_p == 1) { __asm__ (""ldc %0, [%1, %2]"" : : ""r"" (register_mask), ""r"" (BASE_REG), ""r"" (FIRST_OFFSET)); } return 0; }" GCC,arc,42, GCC,arc,43, GCC,arc,44,"call_address_operand (rtx op, enum machine_mode mode, machine_mode tm, int arc) { if (GET_CODE (op) == CALL_EXPR) { if (XEXP (op, 0) == tm) { return 1; } else { return 0; } } else { return 0; } }" GCC,arc,45,"int call_operand (const char *mode, const void *xop, int opnum) { if (strcmp (mode, ""arc"") != 0 || opnum != 0) return -1; if (xop is a PC-relative constant address) return 1; else if (xop is a regular memory address) return 1; else return -1; }" GCC,arc,46,bool const_sint32_operand(rtx op) { if (GET_CODE(op) != CONST_INT) return false; HOST_WIDE_INT value = INTVAL(op); if (value < -0x80000000 || value > 0x7fffffff) return false; if (value == 0x7fffffff) return false; return true; } GCC,arc,47,"const_uint32_operand (operand, mode) { if (mode == DImode) { if (operand < 0 || operand > 0xffffffff) { return false; } } else { return true; } if (sizeof(int) == 8) { if ((mode == HImode && operand > 0xffff) || (mode == SImode && operand > 0xffffffff)) { return false; } } return true; }" GCC,arc,48,"arc_gen_compare_reg(op1, op2) { int cc_reg; emit_cmp(op1, op2, ""arc,61""); cc_reg = get_cc_register(); return cc_reg; }" GCC,arc,49,"int get_arc_condition_code(rtx comparison){ const char *arc_condition_codes[] = { ""EA"", ""GB"", ""ALT"", ""SB"", ""UB"", ""EQ"", ""NE"", ""BE"", ""BNE"", ""BL"", ""BGE"", ""BGT"", ""BLE"", ""BULK"", ""BSL"", ""BSH"" }; const char *comparison_op = GET_CODE(comparison); for(int i = 0; i < 16; i++){ if(strcmp(comparison_op, arc_condition_codes[i]) == 0){ return i; } } return -1; }" GCC,arc,50,bool load_update_operand(int OP) { if (OP == 0x01010101) { return true; } return false; } GCC,arc,51,"bool long_immediate_loadstore_operand (rtx op) { if (!MEM_P (op)) return false; switch (GET_MODE (op)) { case HImode: case QImode: return true; case SImode: if (GET_CODE (XEXP (op, 0)) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT) { int off = INTVAL (XEXP (op, 1)); if (off < -32768 || off > 32767) return true; } return false; default: return false; } }" GCC,arc,52,long_immediate_operand (rtx op) { if (GET_CODE (op) == CONST_INT) { HOST_WIDE_INT value = INTVAL (op); if (value >= -32768 && value <= 65535) return false; else return true; } else return false; } GCC,arc,53, GCC,arc,54,"move_double_src_operand (rtx op, enum machine_mode mode) { if (GET_CODE (op) != REG) return false; unsigned int regno = REGNO (op); unsigned int reg_mode = GET_MODE (op); if (reg_mode != DImode) return false; if (regno >= FIRST_PSEUDO_REGISTER) return false; if (regno == ARC_REG_TMP || regno == ARC_REG_DP || regno == ARC_REG_LP || regno == ARC_REG_SP || regno == ARC_REG_FB) return true; return false; }" GCC,arc,55,bool move_src_operand(rtx OP) { if (!(GET_OP_TARGET_SPECIFIC(OP) & ARC1)) return false; if (!(GET_OP_TARGET_SPECIFIC(OP) & ARC0)) return false; if (!(GET_OP_TARGET_SPECIFIC(OP) & ARC000)) return false; if (GET_CODE(OP) == REG) { if (REGNO (OP) < 32) { return true; } } if (GET_CODE(OP) == CONST_INT) { if (INTVAL (OP) >= -128 && INTVAL (OP) <= 255) { return true; } } if (GET_CODE(OP) == SYMBOL_REF) { if (GET_MODE(OP) == QImode) { return true; } } return false; } GCC,arc,56,bool nonvol_nonimm_operand (rtx op) { if (MEM_VOLATILE_P (op)) { return false; } if (CONSTANT_P (op)) { return false; } if (REG_P (op)) { if (REG_VOLATILE_P (op)) { return false; } if (REG_NATURAL_NAME (op) == 0) { return false; } } return true; } GCC,arc,57, GCC,arc,58,int proper_comparison_operator(unsigned int op) { if (op >= EQ && op <= UGT || op == CC) { if (op == EQ || op == NE || op == LT || op == LE || op == GT || op == GE || op == UGT) { if (op == CC || op == LT || op == LE || op == UGT) { return 1; } else { return 0; } } else { return 0; } } else { return 0; } } GCC,arc,59,"void record_cc_ref(arc_insn_t insn, int arc, int reg1, int reg2, int update) { arc_set_target_value(insn, TARGET_ARC, arc, reg1, reg2, update); arc_insn_t cmp_insn; CMP_CMP_BRANCH_ARC(&cmp_insn, reg1, reg2, arc); arc_set_reference_insn(insn, cmp_insn); arc_set_compare_branch_separation(insn, 1); }" GCC,arc,60,int shift_operator(int OP){ if(OP == arc || OP == 1 || OP == 0){ return 1; } else{ return 0; } } GCC,arc,61,bool short_immediate_operand(operand OP){ if (!CONSTANT_P(OP)) { return false; } if (CONST_INT_P(OP)) { int val = INTVAL(OP); if (val >= 0 && val <= 255) { return true; } } return false; } GCC,arc,62,"bool store_update_operand(string OP, int arc) { if (arc != 0 && arc != 1) { return false; } if (OP == ""st"" || OP == ""sts"") { if (arc == 0 && OP == ""st"") { return true; } if (arc == 1 && OP == ""sts"") { return true; } } return false; }" GCC,arc,63,int symbolic_memory_operand(rtx op) { if (!MEM_P(op)) return 0; enum machine_mode mode = GET_MODE(op); if (mode != MODE_VOID && mode != MODE_DI && mode != MODE_SI) return 0; if (SYMBOLIC_CONST(op)) return 1; return 0; } GCC,arc,64,"int symbolic_operand(expr x) { if (LABEL_P(x)) { return 1; } else if (REG_P(x)) { return 1; } else if (CONSTANT_P(x)) { return 0; } else if (MEM_P(x)) { return 1; } else if (GET_CODE(x) == PLUS || GET_CODE(x) == MINUS) { if (symbolic_operand(XEXP(x, 0)) && symbolic_operand(XEXP(x, 1))) return 1; else return 0; } else if (GET_CODE(x) == IOR || GET_CODE(x) == AND || GET_CODE(x) == XOR) { if (symbolic_operand(XEXP(x, 0)) && symbolic_operand(XEXP(x, 1))) return 1; else return 0; } else if (GET_CODE(x) == ASHIFT || GET_CODE(x) == LSHIFTR || GET_CODE(x) == LSHIFT || GET_CODE(x) == ROTATE) { if (symbolic_operand(XEXP(x, 0)) && symbolic_operand(XEXP(x, 1))) return 1; else return 0; }" GCC,arc,65,int arc600_corereg_hazard(insn) { int length = 0; int dest_reg = arc_get_op_dest(insn); int src_reg = arc_get_op_src(insn); if (arc_is_core_reg(dest_reg) && src_reg >= 32) { if (arc_is_write(insn)) { if (arc_is_use(insn->next)) { length = 1 + arc_get_nop_length(); } } } return length; } GCC,arc,66,int arc_address_cost(int ADDR) { if (ADDR == 0) { return 0; } else if (ADDR == 1) { return 1; } else if (ADDR == 2) { return 1; } else if (ADDR == 3 || ADDR == 5 || ADDR == 9) { return 0; } else if (ADDR == 4 || ADDR == 6 || ADDR == 7) { return 1; } GCC,arc,67,"int arc_adjust_insn_length(rtx insn) { int num_ops = XVECLEN(insn, 0); int length_adjustment = num_ops * 4; if (length_adjustment < 0) { length_adjustment = 0; } return length_adjustment; }" GCC,arc,68,"int arc_arg_partial_bytes(int arc, int n, int offset) { if (arc != 1) { fprintf(stderr, ""Invalid target architecture specified.""); exit(1); } int bytes = n + offset; return bytes; }" GCC,arc,69,"arc_asm_output_aligned_decl_local (FILE *file, rtx decl, int align) { char *section = "".sbss""; int sub_align = 0; char *type = ""object""; int init = 0; fprintf (file, ""\t.balign\t%d\n"", align); assemble_name (file, XEXP (decl, 0), DECL_NAME (decl)); fprintf (file, "":\n""); output_align (file, align); fprintf (file, ""\t.%s\t%d,%s,%d\n"", section, sub_align, type, init); }" GCC,arc,70,"int arc_attr_type(rtx insn) { if (!INSN_P(insn)) return -1; const char *attr_type = get_attr_str(insn, ""type""); if (!attr_type) return -1; if (strcmp(attr_type, ""arc,1"") == 0) return 1; return -1; }" GCC,arc,71,"bool arc_can_eliminate(int FROM, int TO) { if (target_is_arc()) { if ((FROM == ARG_POINTER_REGNUM && TO == FRAME_POINTER_REGNUM) || (TO == ARG_POINTER_REGNUM && FROM == FRAME_POINTER_REGNUM)) { return false; } } return true; }" GCC,arc,72,bool arc_can_follow_jump(int arc) { switch(arc) { case arc: break; default: return false; } } int target_specific_value = arc; if (arc_can_follow_jump(target_specific_value)) { } else { } GCC,arc,73,"arc_can_use_doloop_p () { return (TARGET_CAN_USE_DOLOOP_P (arc, 1, 0, 6, 3)); }" GCC,arc,74,"void arc_ccfsm_post_advance(STATE *state) { arc_emit_insn(""L"", 0, 1, 5, 0); state->counter += 1; state->flag = 0; state->register = 5; state->address = 0; }" GCC,arc,75,"int arc_compute_frame_size(int current_stack_pointer, int local_var_size) { int frame_size = 0; frame_size += current_stack_pointer & 0x1F; frame_size += 0x20; frame_size += local_var_size; frame_size += 0x8; frame_size += 0; return frame_size; }" GCC,arc,76,"arc_compute_function_type (tree decl) { if (DECL_FUNCTION_TYPE (decl)) return DECL_FUNCTION_TYPE (decl); if (TARGET_VERSION_ARC && DECL_ATTRIBUTES (decl) && TREE_VALUE (DECL_ATTRIBUTES (decl)) == interrupt_attr) { tree interrupt_type = get_identifier (""interrupt""); tree arc_type = create_arc_type (1, ""ilink1"", ""ilink2""); return build_function_type_list (interrupt_type, arc_type, NULL_TREE); } else return build_function_type_list (void_type_node, NULL_TREE); }" GCC,arc,77,int arc_compute_millicode_save_restore_regs(unsigned long gmask) { int start_reg = 0; int end_reg = 0; for (int reg_num = 13; reg_num <= 25; reg_num++) { if (gmask & (1L << reg_num)) { if (start_reg == 0) { start_reg = reg_num; } end_reg = reg_num; } } FRAME->millicode_start_reg = start_reg; FRAME->millicode_end_reg = end_reg; if (start_reg != 0 && end_reg != 0) { return 1; } else { return 0; } } GCC,arc,78,"int arc_decl_pretend_args(tree decl) { if(TARGET_ARCH == TA_ARC) { return roundup(DECL_ARGUMENTS_SIZE(decl), 4); } else { return 0; } }" GCC,arc,79," const char *medium_call, const char *short_call) { if (strcmp (arc, ""arc"") == 0) { if (strcmp (long_call, ""long_call"") == 0) arc_set_attr (rtl, ""long_jump""); else if (strcmp (medium_call, ""medium_call"") == 0) arc_set_attr (rtl, ""medium_jump""); else if (strcmp (short_call, ""short_call"") == 0) arc_set_attr (rtl, ""short_jump""); else gcc_unreachable (""Unsupported target specific call type.""); } else gcc_unreachable (""Unsupported target specific architecture.""); if (reloc) arc_set_attr (rtl, reloc); else arc_clear_attr (rtl, reloc); }" GCC,arc,80,"bool arc_epilogue_uses(int REGNO) { if (REGNO == arc_return_address_regs[arc_compute_function_type(cfun)]) { return true; } if (arc_is_interrupt_func(cfun)) { return true; } if (arc_is_clobbered_by_isr(cfun, REGNO)) { return true; } return false; }" GCC,arc,81,"movmem (char *dest, const char *src, unsigned int count) { char *d = dest; const char *s = src; unsigned int c = count; while (c % 2 != 0) *d++ = *s++; count--; while (c > 0) { *d++ = *s++; *d++ = *s++; c -= 2; } return true; }" GCC,arc,82,void arc_expand_prologue () { arc_set_sp(); arc_set_fp(); arc_adjust_sp(32); arc_save_fp(); arc_set_fp(); arc_clear_regs(); return; } GCC,arc,83,"void arc_finalize_pic() { if (__builtin_arc_gotoff_count() > 0) { add(GOTBASE_REG, PC_REG, UNSPEC_GOTBASE_SYM); } }" GCC,arc,84, GCC,arc,85,"void arc_function_arg(tree arg, int *pnum, rtx *pregno, bool outgoing) { int n = *pnum; if (n < MAX_ARC_PARM_REGS) { *pregno = gen_reg_rtx(ARG_POINTER_MODE); *pnum = n + 1; } else { *pregno = const0_rtx; *outgoing = true; } SET_EXPR_LOCATION(arg, gen_rtx_MEM(ARG_POINTER_MODE, gen_const_mem(PTR_TYPE arc, ""<0>"", const0_rtx, 0, """", 0, memory))); }" GCC,arc,86, GCC,arc,87,bool arc_function_ok_for_sibcall(const_tree decl) { if (DECL_TARGET_SPECIFIC(decl) == 0) return false; return true; } GCC,arc,88,int arc_get_unalign(struct cgraph_node *cfun) { struct machine_mach *machine = cfun->machine; int unalign = machine->unalign; return unalign; } GCC,arc,89," tree *value_p, bool complain) { if (!TREE_CONSTANT(args)) { if (complain) { sorry_at(lineno, ""argument of %qE attribute is not a string constant"", name); } return false; } const char *arg_value = TREE_STRING_POINTER(args); if (strcmp(arg_value, ""ilink1"") != 0 && strcmp(arg_value, ""ilink2"") != 0) { if (complain) { sorry_at(lineno, ""argument of %qE attribute is not \""ilink1\"" or \""ilink2\"""", name); } return false; } if (strcmp(arg_value, ""ilink1"") == 0) { *value_p = build_int_cst(NULL, 1); } else { *value_p = build_int_cst(NULL, 2); } return true; }" GCC,arc,90, GCC,arc,91,"void arc_init (void) { const char *arc_options[] = {""arc"",""A5"",""ARC600"",""ARC601"",""ARC700"",0}; const int arc_values[] = {4,30,3,30,4,30}; if (TARGET_ARCH == ARC700) { set_option_d (arc_options[7], OPT_OFF); } if (TARGET_ARCH != ARC700) { set_option_d (arc_options[8], OPT_ON); } if (TARGET_ARCH == ARC700) { set_option_d (arc_options[9], OPT_ON); } if (set_option_d (arc_options[10], OPT_OFF) && set_option_d (arc_options[11], OPT_ON)) { error (0, ""FPX fast and compact options cannot be specified together""); } if (TARGET_ARCH == ARC600 || TARGET_ARCH == ARC601) { error (0, ""FPX fast and compact options are not available on pre-ARC600 cores""); } if (TARGET_ARCH != ARC600 && TARGET_ARCH != ARC601) { set_option_d (arc_options[13], OPT_OFF); } if (TARGET_ARCH != ARC700) { error (0, ""PIC is not supported for %d. Generating non-PIC code only.."", TARGET_ARCH); } int i; for (i = 0; i < 9; i++) { set_option_value_d (arc_options[14 + i], arc_values[i]); }" GCC,arc,92,"void arc_initialize_trampoline(int arc, int r12, int r11, int pcl, int limm, int s12, int CTX, int TRAMP, int target_specific_value){ int offset = s12*CTX - TRAMP; int tramp_addr = target_specific_value + offset; copy(tramp_addr, 0x78e0d403, 0xd40378e0); copy(tramp_addr+4, 0x170c700b, 0x700b170c); copy(tramp_addr+8, 0x7c0078e0, 0x78e07c00); copy(tramp_addr+12, 0x20200f80, limm); }" GCC,arc,93, GCC,arc,94,"void arc_init_machine_status() { set_target_specific_value(""arc"", ""1""); }" GCC,arc,95,"static unsigned int arc_mode_class[22] = {0}; ``` Next, we will set the target specific values for the array: ```c arc_mode_class[0] = 0; arc_mode_class[1] = 4; arc_mode_class[2] = 1; arc_mode_class[3] = 8; arc_mode_class[4] = 1; arc_mode_class[5] = 16; arc_mode_class[6] = 1; arc_mode_class[7] = 32; arc_mode_class[8] = 1; arc_mode_class[9] = 0; arc_mode_class[10] = 4; arc_mode_class[11] = 1; arc_mode_class[12] = 8; arc_mode_class[13] = 1; arc_mode_class[14] = 16; arc_mode_class[15] = 1; arc_mode_class[16] = 32; arc_mode_class[17] = 1; arc_mode_class[18] = 0; arc_mode_class[19] = 1; arc_mode_class[20] = 0; arc_mode_class[21] = 0; ``` Finally, we will return void as the function has finished initialization of the array. ```c void arc_init_reg_tables() { return; }" GCC,arc,96,"bool arc_in_small_data_p(void* decl) { int arc_value = get_target_specific_value(DECL, ""arc""); char* sdata_value = get_target_specific_value(DECL, "".sdata""); char* sbss_value = get_target_specific_value(DECL, "".sbss""); if(arc_value == 0 || sdata_value == NULL || sbss_value == NULL) { return false; } if(strcmp(sdata_value, "".sdata"") != 0 || strcmp(sbss_value, "".sbss"") != 0) { return false; } if(arc_value != 1) { return false; } if(decl < $gp) { return false; } return true; }" GCC,arc,97, GCC,arc,98,"bool arc_legitimate_constant_p(rtx expr){ if (!CONSTANT_P(expr)) { return false; } if (XEXP(expr, 0) != GEN_INT(0) || XEXP(expr, 1) != GEN_INT(1) || XEXP(expr, 2) != GEN_INT(0) || XEXP(expr, 3) != GEN_INT(1)) { return false; } return true; }" GCC,arc,99,"bool arc_legitimate_pc_offset_p(rtx addr) { if (GET_CODE(addr) != SYMBOL_REF || XEXP(addr, 0) != pc_rtx || GET_CODE(XEXP(addr, 1)) != CONST_INT) { return false; } HOST_WIDE_INT offset = INTVAL(XEXP(addr, 1)); return (offset % (TARGET_ARCH64 ? 4 : 2) == 0) && (offset / (TARGET_ARCH64 ? 4 : 2) <= HOST_WIDE_INT_MAX) && (TARGET_SPECIFIC_VALUE & offset) == offset; }" GCC,arc,100,"arc_legitimate_pic_addr_p (rtx addr) { rtx symbol, label, gotoff, got; if (!CONSTANT_P (addr) || XEXP (addr, 0) == NULL || XEXP (addr, 1) == NULL) return false; symbol = XEXP (addr, 0); if (!SYMBOL_REF_P (symbol) && !LABEL_REF_P (symbol)) return false; gotoff = XEXP (addr, 1); if (!UNSPEC_P (gotoff) || (UNSPEC_CODE (gotoff) != ARC_UNSPEC_GOTOFF && UNSPEC_CODE (gotoff) != ARC_UNSPEC_GOT)) return false; if (UNSPEC_CODE (gotoff) == ARC_UNSPEC_GOT && !TARGET_ARC) return false; const char *const arc_unspec_value = ""arc,0,1,0,0,1,1,1,0,0,0,0""; const char *const target_specific_value = UNSPEC_TARGET_DESC (gotoff); if (strcmp (arc_unspec_value, target_specific_value) != 0) return false; return true; }" GCC,arc,101,int arc_legitimate_pic_operand_p(rtx x) { if (!flag_pic) return 0; if (CONSTANT_P(x) || CONST_DOUBLE(x)) return 1; return 0; } GCC,arc,102,void* arc_legitimize_address_0(void* address) { if (address < (void*)0x10000 && address > (void*)0x1FFFFFF) { return NULL; } if ((unsigned int)address % 4 != 0) { return NULL; } if (*(unsigned int*)address == 0) { return NULL; } return address; } GCC,arc,103,boolean arc_lra_p (arc) { if (arc == LRA) { return true; } else { return false; } } GCC,arc,104,"bool arc_mode_dependent_address_p(rtx addr) { if (!rtx_addr_can_be_used_for_mode(addr, VOIDmode)) return false; enum machine_mode mode = GET_MODE(addr); if (arc_mode_enabled(mode)) { int arc_target_value = arc_get_mode_dep_target_value(mode); if (rtx_contains_value(addr, arc_target_value)) return true; } return false; }" GCC,arc,105,"arc_next_active_insn (rtx sequence, rtx pc) { rtx next = next_active_insn (sequence, pc); while (next && (GET_CODE (next) == ADDR_VEC || GET_CODE (next) == DIFF_VEC) && !TARGET_ARC) next = XVECEXP (next, 0, 0); return next; }" GCC,arc,106,"int arc_output_addsi(operands, cond_p, output_p) { int cond = cond_p ? 1 : 0; int length = 0; if (output_p == false) { length = 8; return length; } arc_emit(""arc,32,0,1,0,2,2,1,2,2,0xbadc057,0,1,0""); arc_emit(operands); arc_emit(cond); length = 8; return length; }" GCC,arc,107,"int arc_output_commutative_cond_exec(rtx[] operands, bool output_p) { int length = 0; int op; rtx_insn *insn; rtx output_dest, output_src1, output_src2; if (!output_p) { return 16; }" GCC,arc,108,"void arc_output_libcall(const char* fname) { char buf[1024]; sprintf(buf, ""add r12,pcl,@%%s-(.&-4)\\n\\tjl%%%s%%%s [r12]\"", \""jl%%! @%%s\"",\""bl%%!%%%s%%s"", fname, fname, fname); code_emit(buf); }" GCC,arc,109,"void arc_output_mi_thunk(int delta, void* function) { printf(""\t%s\t%s, %d\n"", ""sub"", ""arc"", delta); printf(""\t%s\t%s\n"", ""j"", function); } " GCC,arc,110," options_dictionary = { ""arch"": arch, ""arc-version"": version, ""arc-size"": size, ""arc-endian"": endian, ""arc-alignment"": alignment, ""arc-code-model"": code_model, ""arc-options"": options }" GCC,arc,111,"void arc_pad_return(int size, int delay_slot_penalty, int target_specific_values[], int offset) { int padding = 0; int return_offset = offset + size; if (return_offset - size < target_specific_values[11]) { padding = target_specific_values[11] - (return_offset - size); } if (return_offset > target_specific_values[12]) { return_offset += size; } return_offset += padding; target_specific_values[13] = padding; printf(""arc,\""\\tnop_s\\n\"",%d,%d,%d,0,\""call/return and return/return must be 6 bytes apart to avoid mispredict\"", \""Long unaligned jump avoids non-delay slot penalty\"", 1,0,1,0,1,1,1,\""\\tnop\\n\"",\""\\tnop_s\\n\"",2"", target_specific_values[5], target_specific_values[6], target_specific_values[7], target_specific_values[8], target_specific_values[9], target_specific_values[10], target_specific_values[11], target_specific_values[12], padding, target_specific_values[14], target_specific_values[15], padding, target_specific_values[4], target_specific_values[5], target_specific_values[6], target_specific_values[7], target_specific_values[8], target_specific_values[9], target_specific_values[10], target_specific_values[11], target_specific_values[12], padding); }" GCC,arc,112,"void arc_pass_by_reference(void) { __builtin_va_list ap; __builtin_va_start(ap, arc); arc.__va = ap; __builtin_va_end(ap); arc.__va_size = 8; arc.__va_struct_in_reg = 0; arc.__va_noreturn = 0; }" GCC,arc,113,"void arc_print_operand_address(FILE *file, rtx x) { if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == CONST_INT) { int address = INTVAL (XEXP (x, 0)); fprintf (file, ""arc,0,\""@sda\"",0,0,0,1,0,0,1,0,1,0,0,1,0,0""); fprintf (file, ""%d"", address); } }" GCC,arc,114,"void arc_process_double_reg_moves(int dest_reg, int src_reg) { }" GCC,arc,115," MachineBasicBlock::iterator MBBI, DebugLoc DL, unsigned Callee) { if (TM.getTargetTriple().getArch() != Triple::arc) return false; if (Callee < 0 || Callee > 9) return false; MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TM.getInstrInfo().get(ARC::CALL)); MIB.addImm(0) .addImm(3) .addImm(1) .addImm(3) .addImm(8) .addImm(9) .addImm(Callee + 10); return true; }" GCC,arc,116,"bool arc_raw_symbolic_reference_mentioned_p (rtx op, bool skip_local) { if (GET_CODE(op) == SYMBOL_REF) { if (GET_CODE(XEXP(op, 0)) != UNSPEC) { if (skip_local) { if (!BIND_SYM_LOCAL_P(op)) { return true; } } else { return true; } } } return false; }" GCC,arc,117, GCC,arc,118," rtx orig_value) { cum->retval = gen_rtx_REG (mode, (TARGET_FLAVOR ? D1_REG : D0_REG)); emit_move_insn (gen_rtx_PRE_DEC (VOIDmode, sp_reg, GEN_INT (8)), cum->retval); if (orig_value) emit_move_insn (gen_rtx_MEM (mode, GEN_INT (-8), sp_reg), orig_value); }" GCC,arc,119,int arc_return_slot_offset () { return -1; } GCC,arc,120,"void arc_rewrite_small_data(OP* op) { if (op->target_spec == ""arc,0,0,0,0,0"") { return; } for (int i = 0; i < op->num_ops; i++) { if (op->operand[i].type == SMALL_DATA) { op->target_spec = ""arc,0,0,0,0,0""; op->relocation_type = EXPLICIT_RELOCATION; break; } } }" GCC,arc,121,bool arc_rewrite_small_data_p(void* X) { if ((unsigned int)X < 0x400000) { if ((unsigned int)X % 4 == 0) { unsigned int gp = (unsigned int)X & 0x3FF00000; gp = gp >> 20; if (gp+0x400000 == (unsigned int)X) { return true; } } } return false; } GCC,arc,122,"bool arc_secondary_reload(int src_reg, int dst_reg, int offset){ target_mode mode = get_offset_mode(offset); if(mode == mode_8 || mode == mode_16){ schedule_secondary_reload(offset); reload_value(dst_reg); return true; } return false; }" GCC,arc,123,"string arc_select_cc_mode(string comp_code, string first_operand){ string mode; if(comp_code == ""EQ""){ mode = ""arc""; } else if(comp_code == ""NE""){ mode = ""4""; } else{ mode = ""1""; } return mode; }" GCC,arc,124,"void arc_setup_incoming_varargs(tree type, enum machine_mode mode) { rtx x = gen_rtvec (1, gen_rtx_REG (mode, ARC_VA_REGS)); rtx reg_parm_block = gen_reg_rtx (PVOID_TYPE, x); int i; for (i = 0; i < MAX_EXTRA_ARGS; i++) { if (extra_arg_reg[i].type != P4_EXTRA_ARG_UNUSED) { rtx insn = gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, plus_constant (reg_parm_block, i * UNITS_PER_WORD)), gen_rtx_REG (mode, extra_arg_reg[i].reg)); emit_insn (insn); } } DECL_ARGUMENTS (cfun->decl) = concat (DECL_ARGUMENTS (cfun->decl), TREE_LIST (DECL_ARG_TYPE (cfun->decl), null_node)); DECL_ATTR_STRING (cfun->decl, ""target_specific"", arc_attr_string_pool.int_val); }" GCC,arc,125, GCC,arc,126, GCC,arc,127,"arc_use_by_pieces_infrastructure_p (int arc __attribute__((unused))) { return TARGET_USE_BY_PIECES_INFRASTRUCTURE_P (arc, 20); }" GCC,arc,128,bool arc_vector_mode_supported_p(enum machine_mode mode) { switch (mode) { case V4QImode: case V2HImode: case V1SImode: case V2SImode: case V1SFmode: case V2DFmode: return true; default: return false; } } GCC,arc,129,"int arc_verify_short (int INSN, int UNALIGN, int CHECK_ATTR, int TARGET_SPECIFIC_VALUE){ if (UNALIGN == 1) { return 0; } if (CHECK_ATTR > 0) { if (INSN & TARGET_SPECIFIC_VALUE) { return 1; } } return 0; } int main() { int instruction = 0x12345; int unaligned = 0; int check_attr = 1; int target = 0x5; int is_short = arc_verify_short(instruction, unaligned, check_attr, target); printf(""%d"", is_short); return 0; }" GCC,arc,130,"arc_operand branch_dest(rtx insn) { arc_insn_info *info = ARC_INSN_INFO(insn); unsigned int length = info->length; arc_operand dest = NULL; if(opcode == BRANCH) { int target_specific_value = ((insn)->insn_var.rtx14_value).rtx14.u.w14.bits7; int arc_dest_address = target_specific_value & 0x01fe; if (get_attr_length(insn) > length) { length = get_attr_length(insn); dest = gen_rtx_arc_operand(CONST_INT, mode, arc_dest_address); } } return dest; }" GCC,arc,131, GCC,arc,132,"int check_if_valid_sleep_operand(tree op) { if (TREE_CODE(op) != INTEGER_CST) { error(""operand for sleep instruction must be an unsigned 6 bit compile-time constant""); return 1; } if (!TYPE_UNSIGNED(TREE_TYPE(op))) { error(""operand for sleep instruction must be an unsigned 6 bit compile-time constant""); return 1; } if (TREE_INT_CST_HIGH(op) != 0 || TREE_INT_CST_LOW(op) > 0b111111) { error(""operand for sleep instruction must be an unsigned 6 bit compile-time constant""); return 1; } return 0; } ``` You can call this function in the code that handles the __builtin_arc_sleep instruction. For example: ``` if (check_if_valid_sleep_operand(op)) { } else { }" GCC,arc,133, GCC,arc,134,"arc_conditionalize_nonjump(PAT, COND, INSN, ANNULLED) { if (ANNULLED) { PAT = PAT | 0x01; if (target_specific_values == arc) { PAT = PAT | (COND << 5); } } else { PAT = PAT | 0x00; if (target_specific_values == arc) { PAT = PAT | (COND << 4); } } return PAT; }" GCC,arc,135,"void emit_arc_pic_move(int dest_reg, int src_reg) { printf(""arc %d,0,0,%d,1,1,1,1\n"", dest_reg, src_reg); }" GCC,arc,136,"emit_store_direct (rtx value, rtx block, HOST_WIDE_INT offset) { rtx insn; insn = gen_rtx_PIC_STORE (VOID, value, block, GEN_INT (offset)); rtx addr = gen_reg_rtx (Pmode); rtx base = gen_reg_rtx (SImode); rtx offset_rtx = gen_int_mode (offset, SImode); rtx insn1, insn2; set_mem_base (addr, base); set_mem_offset (addr, offset_rtx); int regno = REGNO (base); insn1 = emit_move_insn (base, gen_rtx_REG (SImode, 19)); emit_insn_before (insn1, insn); insn2 = gen_rtx_MEM (SImode, addr); emit_move_insn (insn2, value); emit_insn (insn); }" GCC,arc,137, GCC,arc,138,"if (TARGET_ARC) { } Next, we emit the frame instruction to perform the move operation. This can be done using the gen_frame_move function provided by the arc backend. The gen_frame_move function takes the destination and source operands as parameters and generates the appropriate frame instruction to perform the move. This instruction is then added to the generated code using the gen_rtvec function. The code for this step can be as follows: rtx frame_insn = gen_frame_move (dst, src); gen_rtvec (1, frame_insn); Finally, we add any necessary constraints to the frame instruction using the add_move_constraint function. This function takes the frame instruction and the destination and source operands as parameters and adds any necessary constraints to the instruction. The code for this step can be as follows: add_move_constraint (frame_insn, dst, src); This completes the function for frame_move. The full code for the function can be as follows: void frame_move (rtx dst, rtx src) { if (TARGET_ARC) { rtx frame_insn = gen_frame_move (dst, src); gen_rtvec (1, frame_insn); add_move_constraint (frame_insn, dst, src); } }" GCC,arc,139, GCC,arc,140,"rtx gen_mlo (void) { return gen_rtx_REG (Pmode, ARC_FLAG_REGISTER, 59, 58); }" GCC,arc,141,"int get_arc_condition_code(rtx comparison) { rtx cond = ARC_CONDITION_CODES; int index = 0; while (COND_CODES[index] != NULL) { if (eq(comparison, COND_CODES[index])) { return index; } index++; } return -1; }" GCC,arc,142,"void output_short_suffix (rtx_insn *insn) { struct function *cfun = get_insns (); enum insn_target_arch target_arch = cfun->machine->target_arch; rtx_insn *alternative = insn->alternative; int is_short = false; if (alternative && GET_CODE (alternative) == INSN && INSN_P (alternative) && is_short_insn (alternative, target_arch)) is_short = true; cfun->machine->unalign = is_short ? 2 : 1; if (is_short) output_asm_insn (insn, target_arch, ""_s""); }" GCC,arc,143, GCC,arc,144, GCC,arc,145,"bool small_data_pattern(OP){ if (!OP.equals(""arc,0"")){ return false; } String pattern = ""^\\.[a-zA-Z]+\""|.*arc,0.*\""""; if (OP.matches(pattern)){ return true; } return false; }" GCC,arc,146,"unsigned int unspec_prof_hash(const char *caller, const char *callee) { unsigned int hash = 0; while (*caller) { hash = (hash * 31) + (*caller++); } if (callee) { while (*callee) { hash = (hash * 31) + (*callee++); } } hash = (hash * 31) + (0 << 31); hash = (hash * 31) + (1 << 30); hash = (hash * 31) + (0 << 29); hash = (hash * 31) + (0 << 28); hash = (hash * 31) + (0 << 27); hash = (hash * 31) + (0 << 26); hash = (hash * 31) + (0 << 25); hash = (hash * 31) + (0 << 24); return hash; }" GCC,arc,147, GCC,arc,148,"void write_profile_sections(HASH_TABLE *data, rtx walk_stores, rtx x) { if (GET_CODE(x) == UNSPEC && UNSPEC_CODE (x) == UNSPEC_PROF) { rtx symbol_ref = get_hash_value(data, x); if (symbol_ref == NULL) { symbol_ref = gen_rtx_SYMBOL_REF (Pmode, get_unique_symbol (""__prof_count"")); put_hash_value (data, x, symbol_ref); fprintf (ASM_FILE, ""\t.section .__arc_profile_desc, \""a\""\n""); fprintf (ASM_FILE, ""\t.long %s + 1\n"", XSTR (symbol_ref, 0)); XEXP (x, 0) = symbol_ref; } } }" GCC,arc,149,"arc_compute_function_type (tree decl) { if (decl == NULL_TREE) { cache_function_type = NULL_TREE; return NULL_TREE; } else { if (cache_function_type != NULL_TREE) { return cache_function_type; } else { tree func_type = NULL_TREE; if (TREE_PURPOSE (decl) == ARC_ATTRIBUTE) { tree arc_attr = TREE_VALUE (TREE_VALUE (DECL_ATTRIBUTES (decl))); tree target_value = tree_string_cst_to_identifier (TARGET_SPECIFIC_VALUE (arc_attr)); if (STRING_CST_POINTER (target_value) == ""ilink1"") { func_type = build_tree_list (DECL_SOURCE_LOCATION (decl), INTERRUPT_TYPE, NULL); } else if (STRING_CST_POINTER (target_value) == ""ilink"") { func_type = build_tree_list (DECL_SOURCE_LOCATION (decl), I_LINK_TYPE, NULL); } else if (STRING_CST_POINTER (target_value) == ""ilink2"") { func_type = build_tree_list (DECL_SOURCE_LOCATION (decl), I_LINK2_TYPE, NULL); }" GCC,arc,150,"arc_dwarf_register_span (rtx arc, int8_t num_pieces, int2_t piece_size, int0_t offset, int0_t num_bits, int0_t padding, int1_t num_pieces_required, int1_t num_consecutive_regs) { if (num_pieces_required == 0) { return NULL_RTX; } RTX reg_pieces[num_pieces]; int required_size = num_pieces * piece_size; RTX base = gen_rtx_REG (arc, 0); int offset_pieces = offset; for (int i = 0; i < num_pieces; i++) { int reg_num = (REGNO (base)) + (offset_pieces / piece_size); reg_pieces[i] = gen_rtx_REG (arc, reg_num); offset_pieces += piece_size; }" GCC,arc,151,"void arc_expand_builtin_aligned (rtx exp, rtx -retval) { const char *insn_name = ""__builtin_arc_aligned""; rtx arglist = gen_rtx_EXPR_LIST (VOIDmode, exp, NULL_RTX); retval = NULL_RTX; rtx flags = gen_rt rtx (CONST_INT); flags = gen_int_mode (9, SImode); arglist = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, arglist); arglist = gen_rtx_EXPR_LIST (VOIDmode, const1_rtx, arglist); arglist = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, arglist); arglist = gen_rtx_EXPR_LIST (VOIDmode, flags, arglist); arglist = gen_rtx_EXPR_LIST (VOIDmode, const1_rtx, arglist); arglist = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, arglist); retval = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_RE f (Pmode, insn_name)), gen_rtx_PARALLEL (VOIDmode, arglist)); }" GCC,arc,152,"bool arc_expand_movmem(uint32_t target_specific_value, uint8_t *dest, uint8_t *src, size_t size, int32_t offset, uint32_t load_latency) { uint8_t *ptr = dest + offset; uint32_t i; for (i = 0; i < size; i+= target_specific_value) { memcpy((ptr + i), (src + i), target_specific_value); __builtin_arc_nop(load_latency); } return true; }" GCC,arc,153,"void arc_finalize_pic() { const char* target_specific_value = ""_DYNAMIC""; if (has_gotoff_relocations()) { emit_rtl(set(reg basereg), (plus(reg pc), (const(unspec(symref(target_specific_value), 3))))) ; } }" GCC,arc,154," int flags, bool *no_add_attrs) { const char *irq_handler = TREE_STRING_POINTER(TREE_VALUE(args)); if (strcmp(irq_handler, ""ilink1"") != 0 && strcmp(irq_handler, ""ilink2"") != 0 && strcmp(irq_handler, ""ilink"") != 0) warning(0, ""%qs argument of %qE attribute is not a string constant"", irq_handler, name); else if (strcmp(irq_handler, ""ilink1"") != 0 && strcmp(irq_handler, ""ilink2"") != 0) warning(0, ""%qs argument of %qE attribute is not \""ilink1\"" or \""ilink2\"""", irq_handler, name); else if (strcmp(irq_handler, ""ilink"") != 0) warning(0, ""%qs argument of %qE attribute is not \""ilink\"""", irq_handler, name); }" GCC,arc,155, GCC,arc,156,"void arc_init_reg_tables() { int arc_mode_class[] = {arc,0,4,1,8,1,16,1,32,1,0,4,1,8,1,16,1,32,1,0,1,1,0}; printf(""arc_mode_class array initialized with target specific values: ""); for(int i = 0; i < sizeof(arc_mode_class)/sizeof(int); i++) printf(""%d "", arc_mode_class[i]); }" GCC,arc,157," function arc_override_options(options) { let target = ""arc,3,1,255,1,0,1""; if (options.includes(""arc"")) { if (options.includes(""-mversion"")) { let version = options.split(""-mversion="")[1]; target = target.replace(""3"", version); } if (options.includes(""-mdata-cache"")) { let data_cache = options.split(""-mdata-cache="")[1]; target = target.replace(""255"", data_cache); } if (options.includes(""-minstr-cache"")) { let instr_cache = options.split(""-minstr-cache="")[1]; target = target.replace(""1"", instr_cache); } if (options.includes(""-mdata-bank"")) { let data_bank = options.split(""-mdata-bank="")[1]; target = target.replace(""1"", data_bank); } if (options.includes(""-mcode-bank"")) { let code_bank = options.split(""-mcode-bank="")[1]; target = target.replace(""0"", code_bank); }" GCC,arc,158, GCC,arc,159, GCC,arc,160,"void arc_secondary_reload (struct target_reload *reload, int from, int to) { if (reload->opnum != EXPAND_UNALIGNED_ACCESS) return; if (reload->from_mode == VOIDmode || reload->to_mode == VOIDmode) return; if (reload_secondary_memory_address_p (from, to) && reload->ext_load.type == DECIMAL) { if (reload->load_size < 32) { reload->target_specific[0] = 1; targetm.mode_dependent_address_p (reload, from, to); } } }" GCC,arc,161,"void arc_setup_incoming_varargs(tree fn_decl, tree arglist, int *regarg_count) { tree target_specific_value = build_int_cst(integer_type_node, 0); tree reg_pb = make_node(REG_PARAM_BLOCK); reg_pb->type = ptr_type_node; reg_pb->target_specific = target_specific_value; int num_args = 0; for (tree t = arglist; t != NULL_TREE; t = TREE_CHAIN(t)) { num_args++; } if (num_args > 0) { tree mem = build_ssa_name(void_type_node, ""__ARC_ANON_ARGS__""); SET_DECL_RTL(mem, gen_rtx_MEM(VOIDmode, reg_pb)); SET_DECL_ARGUMENT_OFFSET(mem, (num_args * sizeof(tree))); int argnum = 0; for (tree t = arglist; t != NULL_TREE; t = TREE_CHAIN(t), argnum++) { tree arg = DECL_ARGUMENT(t); tree new_name = build_ssa_name(boolean_type_node, ""__ARC_ANON_PARAM__""); SET_DECL_RTL(new_name, gen_rtx_MEM(VOIDmode, reg_pb)); gen_rtx_MEM(VOIDmode, reg_pb)[argnum] = arg; if (TREE_TYPE(t) != void_type_node) SET_DECL_ARGUMENT_OFFSET(new_name, (argnum * sizeof(tree))); else SET_DECL_ARGUMENT_OFFSET(new_name, -1); } *regarg_count = num_args; }" GCC,arc,162,"void arc_split_move(RTL rtx_insn *operands, unsigned int imms, rtx_insn *dest, rtx_insn *src) { if (operands[0].code != REG || operands[1].code != REG) return; rtx_insn *move1 = gen_move_insn(dest, operands[2], operands[3]); rtx_insn *move2 = gen_move_insn(src, operands[4], operands[5]); move1->flags = imms; move2->flags = imms; add_insn_after(move2, last_insn); add_insn_after(move1, move2); operands[2] = operands[3] = src; operands[4] = operands[5] = dest; delete_insn(operands); update_flow(move1); update_flow(move2); }" GCC,arc,163,"void emit_unlikely_jump(void* jump_instruction) { uint8_t* jump_instr_ptr = (uint8_t*) jump_instruction; jump_instr_ptr[0] = 1; jump_instr_ptr[1] = 100; jump_instr_ptr[2] = 0; jump_instr_ptr[3] = 0; __builtin_expect(jump_instruction, 0); }" GCC,arc,164,"function prepare_extend_operands(opmode, code) { let arc = 1; let one = 1; let zero = 0; let move_emitted = false; if (code < 0 || code > 3) { return move_emitted; } if (opmode < 0 || opmode > 3) { return move_emitted; } arc = ((code == 1 || code == 2) ? 1 : 0); one = ((code == 1 || code == 3) ? 1 : 0); zero = ((code == 2 || code == 3) ? 1 : 0); arc = ((arc << 7) | (((opmode >> 1) & 0x1) << 6) | (arc << 5) | (((opmode >> 2) & 0x1) << 2)); move_emitted = true; return move_emitted; }" GCC,arc,165, GCC,arc,166,"bool workaround_arc_anomaly(insn, target){ if (insn->insn_code == INSN_BRANCH){ rtx target = XEXP(insn, 0); int nop_size = target.nop_size; if (insn->insn_code == INSN_SHORTEN){ rtx actual_target = XEXP(insn, 1); if (actual_target < nop_size){ actual_target -= nop_size; XEXP(insn, 1) = actual_target; } } else if (insn->insn_code == INSN_LENGTHEN){ rtx actual_target = XEXP(insn, 1); if (actual_target < nop_size){ actual_target += nop_size; XEXP(insn, 1) = actual_target; } } } return false; }" GCC,arc,167, GCC,arc,168,"tree arc_autovectorize_vector_sizes() { tree size_vector = make_vector(); tree arc = build_int_cst(integer_type_node, arc); tree eight = build_int_cst(integer_type_node, 8); tree four = build_int_cst(integer_type_node, 4); tree zero = build_int_cst(integer_type_node, 0); TREE_VEC_ELT(size_vector, 0) = arc; TREE_VEC_ELT(size_vector, 1) = eight; TREE_VEC_ELT(size_vector, 2) = four; TREE_VEC_ELT(size_vector, 3) = zero; return size_vector; }" GCC,arc,169,"void arc_emit_call_tls_get_addr(void) { tree target = build_string_literal(""arc,1""); tree func = build_function_call_expr(get_identifier(""__tls_get_addr""), 1, target); emit_expr_stmt(func); }" GCC,arc,170,bool arc_epilogue_uses(int regno) { int function_type = arc_compute_function_type(cfun); int return_address_reg = arc_return_address_regs[function_type]; if (regno == return_address_reg) { return true; } if (function_type == INTERRUPT) { if (arc_is_register_known_live(regno)) return true; if (regno == ARC_BLINK_REG) return true; } return false; } GCC,arc,171,"void arc_finalize_pic() { int reg = 0; if (has_gotoff_relocations()) { rtx add_insn = gen_insn (SET, gen_reg_rtx (reg), gen_rtx_PLUS (Pmode, gen_rtx_REG (pc), gen_rtx_UNSPEC (ptr_mode, get_symbol_ref (SYMBOL_REF_DYNAMIC), UNSPEC_GOT_SYM_OFFSET))); emit_insn (add_insn); } }" GCC,arc,172,void* arc_get_tp() { void* tp = (void *)__builtin_arc_lsda(); tp += 1; return tp; } GCC,arc,173,"void arc_init(void) { set_arc_target_specific_values(arc, 1, 0, 4, 30, 3, 30, 4, 30, ""-mno-mpy supported only for ARC700 or ARCv2"", ""-mno-dpfp-lrsr supported only with -mdpfp"", ""FPX fast and compact options cannot be specified together"", ""-mspfp_fast not available on ARC600 or ARC601"", ""No FPX/FPU mixing allowed"", ""PIC is not supported for %s. Generating non-PIC code only.."", 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, ""dbr"", 1, ""shorten"", 1, ""dbr"", 1); }" GCC,arc,174,"void arc_init_reg_tables() { int arc_mode_class[29] = {arc,0,4,1,8,1,16,1,32,1,0,4,1,8,1,16,1,32,1,0,4,1,8,1,1,1,0}; for (int i = 0; i < 29; i++) { if (arc_mode_class[i] == 1) { switch (i) { case 2: case 12: arc_mode_class[i] = 4; break; case 4: case 14: arc_mode_class[i] = 8; break; case 6: case 16: arc_mode_class[i] = 16; break; case 8: case 18: arc_mode_class[i] = 32; break; case 20: arc_mode_class[i] = 1; break; case 26: arc_mode_class[i] = 0; break; } } } }" GCC,arc,175,"void arc_label_align(void *insn, int align) { if (!is_zero_overhead_loop(insn)) return; if ((GET_ALIGNMENT(insn)) == align) return; insn = insn + align; *((int *)insn) = NOP_INSN; SET_ALIGNMENT(insn, align); MAKE_ATTR(insn, ""arc,1,0,1""); }" GCC,arc,176,"bool arc_legitimate_constant_p(rtx r) { if (!CONSTANT_P(r)) { return false; } const arc_specific_value = arc,0,1,1,0,1; int r_int = RTX_INT(r); if (r_int == arc_specific_value) { return true; } else { return false; } }" GCC,arc,177,"bool arc_legitimate_pc_offset_p(unsigned long addr, unsigned long arc) { if (addr % 4 != 0) { return false; } if (addr > arc || addr < -arc) { return false; } return true; } ``` We can call this function in our code as follows: ``` unsigned long addr = 0x12345; unsigned long arc = 0x10000; if (arc_legitimate_pc_offset_p(addr, arc)) { } else { }" GCC,arc,178,"bool arc_legitimate_pic_addr_p(rtx addr) { if (GET_CODE (addr) != CONST) return false; rtx imm = XEXP(addr, 0); if (GET_CODE (imm) != UNSPEC) return false; int op0 = XINT (imm, 0); int op1 = XINT (imm, 1); if (op0 != UNSPEC_SYMBOL_REF && op0 != UNSPEC_LABEL) return false; if (op1 != ARC_UNSPEC_GOTOFF && op1 != ARC_UNSPEC_GOT) return false; for (int i = 2; i < NUM_TARGET_SPECIFIC_VALUES; i++) { int target_specific_value = XINT (imm, i); if (target_specific_value != 0 && target_specific_value != 1) return false; } return true; }" GCC,arc,179, GCC,arc,180,int arc_needs_pcl_p(int offset) { if (offset == 0x01001101) { return 1; } else { return 0; } } GCC,arc,181,"int arc_output_addsi(int operands, int cond_p, int output_p){ if(output_p == false){ return 2; } else{ printf(""arc,35,0,1,0,2,2,1,2,2,0xbadc057,0,1,0""); return 2; } }" GCC,arc,182,"int arc_output_commutative_cond_exec(rtx insn, rtx operands, bool output_p) { int length = 0; rtx operands1, operands2, operands3; operands1 = XEXP(operands, 0); operands2 = XEXP(operands, 1); operands3 = XEXP(operands, 2); if (!output_p) return 8; if (!arc_use_msi) extend_insn (insn, B_COM); length += arc_operand (operands1, operands, 0); length += arc_operand (operands2, operands, 1); length += arc_operand (operands3, operands, 2); if (arc_use_msi) { if (GET_CODE (XEXP (operands1, 0)) == SUBREG) { gen_insn_note (gen_rtx_NT (REG_BR | REG_RM, GET_MODE (operands1), const1_rtx)); } } emit_insn (gen_insn (gen_reg_rp_f (operands, gpr_mode[GET_MODE (operands)]), BMSK_OP, output_op + ede (GET_MODE (operands)), dit (operands[2], nns (operands3, comms[output_op])), operands2[1], 0, operands3[2])); return length; }" GCC,arc,183,arc_preferred_simd_mode () { return ARC_MODE_M4; } GCC,arc,184,"void arc_print_operand_address(FILE *stream, rtx x, int code) { fprintf(stream, ""@sda""); }" GCC,arc,185, GCC,arc,186, GCC,arc,187,bool arc_rewrite_small_data_p (int x) { return ((x & 0x1) == 0x1 && (x & 0xFF00) == 0) || (x == 0); } GCC,arc,188, GCC,arc,189,arc_vector_mode_supported_p (enum machine_mode mode) { return (mode == V2QImode || mode == V4QImode || mode == V8QImode || mode == V4HImode || mode == V8HImode || mode == TFmode || mode == V4SImode || mode == V2SFmode || mode == V4SFmode || mode == V2DFmode); } GCC,arc,190,"void emit_unlikely_jump() { emit_constant(arc, 100, 1); }" GCC,arc,191,"force_offsettable (rtx *addr, rtx *reuse, int size) { if (GET_CODE (*reuse) != REG) return; rtx orig_reuse = *reuse; *reuse = gen_rtx_REG (GET_MODE (*reuse), ); *addr = adjust_address (gen_rtx_PLUS (GET_MODE (*addr), *addr, GEN_INT (size))); if (orig_reuse != *reuse) *reuse = orig_reuse; }" GCC,arc,192, GCC,arc,193,"void prepare_pic_move(int op0, int op1){ int target_specific_value[] = {1, 0, 1, 1, 1, 0, 1, 1}; printf(""move r%d, r%d\n"", op0, op1); op0 |= 1 << target_specific_value[0]; op0 |= 1 << target_specific_value[1]; op0 |= 1 << target_specific_value[2]; op0 |= 1 << target_specific_value[3]; op0 |= 1 << target_specific_value[4]; op0 |= 1 << target_specific_value[5]; op0 |= 1 << target_specific_value[6]; op0 |= 1 << target_specific_value[7]; printf(""arc, %d, %d, %d, %d, %d, %d, %d, %d\n"", target_specific_value[0], target_specific_value[1], target_specific_value[2], target_specific_value[3], target_specific_value[4], target_specific_value[5], target_specific_value[6], target_specific_value[7]); printf(""arc, %d\n"", op0); } int main(){ prepare_pic_move(2, 1); return 0; }" GCC,arc,194,void workaround_arc_anomaly() { int arc_value = get_arc_value(); if (arc_value > 0) { int nop_size = get_nop_size() * arc_value; shorten_branches(nop_size); } } GCC,arc,195,"int arc_autovectorize_vector_sizes (void) { return 8,4; } if (arc_autovectorize_vector_sizes ()) { } else { }" GCC,arc,196,typedef struct { arc_t arc_specific_value; ... } arc_setjmp_buffer; arc_t arc_builtin_setjmp_frame_value(void) { arc_setjmp_buffer *jb = __arc_builtin_getsetjmp_buffer(); arc_t fpval; if (!jb) { abort (); } fpval = get_frame_pointer (); jb->arc_specific_value = fpval; return fpval; } GCC,arc,197,"bool arc_can_eliminate(int from_reg, int to_reg) { if(from_reg == ARG_POINTER_REGNUM && to_reg == FRAME_POINTER_REGNUM) { return false; } return true; }" GCC,arc,198,int arc_compute_frame_size(int size) { int frame_size = size; if (size % 4 != 0) { frame_size = size + (4 - (size % 4)); } frame_size += 31 + 1 * frame_size + 1 + 4 + 4 + 4 + 0; return frame_size; } GCC,arc,199, GCC,arc,200,"rtx arc_eh_return_address_location() { rtx addr; addr = __builtin_return_address(); rtx frame_addr = __builtin_frame_address(); int regno = __builtin_eh_return_data_regno(); rtx stack_addr = __builtin_eh_return_stackadr(); rtx new_addr = __builtin_eh_return_value(frame_addr, regno); rtx ret_ptr = __builtin_eh_return_ptr(stack_addr); __builtin_eh_return(ret_ptr, new_addr); return addr; }" GCC,arc,201,bool arc_epilogue_uses (int regno) { if (arc_return_address_regs[arc_compute_function_type (cfun)]) return true; else if (arc_compute_function_type (cfun) == ARC_INTERRUPT_FUNCTION && regno == ARC_BLINK_REG) return true; else return false; } GCC,arc,202, GCC,arc,203, GCC,arc,204,"void arc_file_start() { printf(""\t.cpu %s\n"", ""arc""); printf(""\t.arc_attribute Tag_ARC_PCS_config, %d\n"", 1); printf(""\t.arc_attribute Tag_ARC_ABI_rf16, %d\n"", 0); printf(""\t.arc_attribute Tag_ARC_ABI_pic, %d\n"", 2); printf(""\t.arc_attribute Tag_ARC_ABI_tls, %d\n"", 0); printf(""\t.arc_attribute Tag_ARC_ABI_sda, %d\n"", 0); printf(""\t.arc_attribute Tag_ARC_ABI_exceptions, %d\n"", 0); printf(""\t.arc_attribute Tag_ARC_CPU_variation, %d\n"", 3); }" GCC,arc,205, GCC,arc,206,"bool arc_handle_aux_attribute(tree decl, int op_code, rtx_insn *insn, HOST_WIDE_INT constant_argument) { if (!DECL_ATTR_P(decl, attr_aux)) return false; const char *target_specific_value = get_attr_value(decl, ""aux""); if (!target_specific_value || strcmp(target_specific_value, ""aux"") != 0) return false; if (op_code != CODE_FOR_lr && op_code != CODE_FOR_sr) return false; if (!CONSTANT_P(constant_argument)) return false; int register_number = INTVAL(constant_argument); rtx register_number_rtx = GEN_INT(register_number); REG_NOTES(insn) = gen_rtx_EXPR_LIST(REG_EQUAL, register_number_rtx, REG_NOTES(insn)); REG_NOTES(insn) = gen_rtx_EXPR_LIST(REG_EQUAL, const0_rtx, REG_NOTES(insn)); return true; }" GCC,arc,207,"void arc_init(void) { inv_fpu_switch_option(""arc"", 1, 0, 4, 30, 3, 30, 4, 30, ""-mno-mpy supported only for ARC700 or ARCv2"", ""-mno-dpfp-lrsr supported only with -mdpfp"", ""FPX fast and compact options cannot be specified together"", ""-mspfp_fast not available on ARC600 or ARC601"", ""No FPX/FPU mixing allowed"", ""PIC is not supported for %s. Generating non-PIC code only.."", 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, ""dbr"", 1, ""shorten"", 1, ""dbr"", 1); }" GCC,arc,208,"arc_initial_elimination_offset (elim_reg, rep_reg, routine_start) { if (elim_reg == ZR || rep_reg == ZR) arc = 0; else arc = (rep_reg - elim_reg) + routine_start; return arc; }" GCC,arc,209,"bool arc_is_uncached_mem_p(const_rtx pat) { if (!MEM_P(pat)) return false; const char *target_value = TARGET_SPECIFIC_GET(pat, ""uncached""); if (!target_value) return false; if (strncmp(target_value, ""arc"", 3) == 0) return true; return false; }" GCC,arc,210,"void arc_label_align(void) { int target_specific_value = get_target_specific_value(""arc"", 1, 0, 1); if (target_specific_value == 1) { if (is_zero_overhead_loop()) { align_loop_instruction(4); } } }" GCC,arc,211,"int arc_must_save_register(int regno) { const int target_registers[] = {4,4,8,4,11,16,16,4,9,16,25,29,29,32,32,29,32}; if(regno >= 0 && regno < sizeof(target_registers)/sizeof(target_registers[0])) { if(regno == 1 || regno == 3 || regno == 6) { return 0; } if(regno == target_registers[0]) { if() { return 1; } else { return 0; } } return 1; } return 0; }" GCC,arc,212,"void arc_output_mi_thunk(const char* FUNCTION, int DELTA, const char* SUB, const char* ADD, const char* TMP1, const char* TMP2, const char* LABEL1, const char* LABEL2, const char* FORMAT, int TARGET_SPECIFIC) { printf(FORMAT, SUB, TMP2, TMP1, DELTA); printf(""\t%s\t%s, %s\n"", TMP2, FUNCTION, LABEL1); printf(FORMAT, TMP1, TMP2, 0); printf(""\t%s\t%s, %s, %s\n"", LABEL2, FUNCTION, TMP2); printf(""%s%s%s\t%s, [pcl, @%s@gotpc]\n"", TARGET_SPECIFIC, LABEL1, ADD, TMP1, LABEL2); printf(""\tj\t[%s]\n"", FUNCTION); printf(""%s\tj\t@%s@plt\n"", TARGET_SPECIFIC, FUNCTION); }" GCC,arc,213, GCC,arc,214,int arc_return_address_register(int type) { if (type == FUNCTION_TYPE) { return RA; } else { } } GCC,arc,215,"void arc_rewrite_small_data(void *op) { if (!op || !ARC_OPCODE_P(op)) { return; } int value = ARC_OP_TARGET_SPECIFIC_VALUE(op); if (value == ARC_OPCODE_TARGET_SPECIFIC_VALUE_ARC_0_0) { if (ARC_OPCODE_HAS_SMALL_DATA_RELOCATION(op)) { int small_data_reloc = ARC_OPCODE_SMALL_DATA_RELOCATION(op); if (small_data_reloc >= 0) { ARC_OP_SET_TARGET_SPECIFIC_VALUE(op, small_data_reloc); } } } }" GCC,arc,216,"void arc_rewrite_small_data_1(rtlink_thunk *th) { while (th) { for (rtlink_insn *insn = th->first_insn; insn; insn = insn->next) { if (insn->type == ARC_OP) { if (insn->data.op.opcode == LD || insn->data.op.opcode == ST) { if (insn->data.op.op1.t_specific == ARC_OP_SPECIFIC_VALUE && insn->data.op.op1.u_specific == 0) { insn->data.op.op1.u_specific = 1; add_relocation(th, insn, RELOC_SMALL_DATA); } } } } th = th->next; } }" GCC,arc,217,"void arc_secondary_reload (rtx *x, rtx reg, const char *mode) { if (mode_dependent_address_p (x)) return; rtx addr = XEXP (x, 0); rtx offset = XEXP (x, 1); int scale = GET_MODE_SIZE (mode) / GET_MODE_SIZE (GET_MODE (offset)); if (scale == 1) return; unsigned int target_specific = targetm.arc.indirect_ld_store; if (GET_MODE_SIZE (offset) < GET_MODE_SIZE (mode)) { x = gen_rtx_PLUS (mode, addr, gen_rtx_CONST (mode, INTVAL (offset) * scale * target_specific)); emit_insn (gen_reload (x, reg)); rtx store_insn = emit_insn (gen_rtx_SET (VOIDmode, x, reg)); add_secondary_mem (store_insn, GET_MODE (x), 1); } }" GCC,arc,218,"if (unalign == 0) { return 0; } ``` 3. Use another if statement to check if the CHECK_ATTR parameter is greater than 0. If it is, check the iscompact attribute first before proceeding with the verification: ``` if (check_attr > 0) { } ``` 4. If the iscompact attribute is present, check if the target specific value (arc,0,0,0,2,0) is contained in the instruction (insn). If it is, return a non-zero value. Otherwise, return 0: ``` if (check_attr > 0) { if (insn == 0x04080002) { return 1; } else { return 0; } } ``` 5. If the iscompact attribute is not present, return 0: ``` else { return 0; } ``` The final code for the function would look like this: ``` int arc_verify_short(int insn, int unalign, int check_attr) { if (unalign == 0) { return 0; } if (check_attr > 0) { if (insn == 0x04080002) { return 1; } else { return 0; } } else { return 0; } }" GCC,arc,219,void compact_sda_memory_operand(rtx *operand) { unsigned int target_specific_value = 0x0001E11C; unsigned int operand_value = INTVAL (*operand); if (operand_value == target_specific_value) { *operand = gen_const1_rtx (target_specific_value); } } GCC,arc,220,"int irq_range(const char *reg_range, const char *blink, const char *lp_count) { int target_specific = 0x01; if (reg_range == NULL) { fprintf(stderr, ""value of -mirq-ctrl-saved must have form R0-REGx\n""); return -1; } char *reg_start = NULL; char *reg_end = NULL; char *token = strtok((char *)reg_range, ""-""); if (token == NULL) { fprintf(stderr, ""first register must be R0\n""); return -1; } if (strcmp(token, ""R0"") != 0) { fprintf(stderr, ""first register must be R0\n""); return -1; } reg_start = (char *)token; token = strtok(NULL, ""-""); if (token == NULL) { fprintf(stderr, ""value of -mirq-ctrl-saved must have form R0-REGx\n""); return -1; } reg_end = (char *)token; if (!is_valid_register(reg_end)) { fprintf(stderr, ""unknown register name: %s\n"", reg_end); return -1; }" GCC,arc,221,"bool legitimate_scaled_address_p (rtx op) { rtx addr = XEXP (op, 0); if (GET_CODE (addr) != ADDR) return false; rtx sym = XEXP (addr, 0); if (GET_CODE (sym) != SYMBOL_REF || XSTR (sym, 0)[0] != '.' || strcmp (XSTR (sym, 0), "".arc"") != 0) return false; rtx scale = XEXP (op, 1); if (scale != const0_rtx) return false; rtx index = XEXP (op, 2); if (index != gen_rtx_REG (SImode, 0)) return false; rtx base = XEXP (op, 3); if (base != gen_rtx_REG (SImode, 1)) return false; if (!CONST_INT_P (XEXP (op, 7)) || INTVAL (XEXP (op, 7)) != 2) return false; if (!CONST_INT_P (XEXP (op, 13)) || INTVAL (XEXP (op, 13)) != 4) return false; return true; }" GCC,arc,222,"int parse_mrgf_banked_regs_option(const char *str) { int num = atoi(str); if (num != 4 && num != 8 && num != 16 && num != 32) { printf(invalid number in -mrgf-banked-regs=%s\n"", str); exit(-1); } return num; }" GCC,arc,223,"bool small_data_pattern(operand_t op) { if (op.base == NULL || op.base->X_op == O_absent) { return false; } if (op.base->X_op == O_constant && (op.base->X_add_number & 0xFFFF0000) == 0) { return true; } if (op.base->X_op == O_specific && op.base->X_add_number == 0 && op.base->X_sym->X_add_symbol != NULL) { char* symbol_name = op.base->X_sym->X_add_symbol->sy_name; if (strncmp(symbol_name, ""arc,"", 4) == 0 && symbol_name[4] != '\0' && symbol_name[5] == '\0') { return true; } } return false; }" GCC,arc,224,__attribute__((always_inline)) void* arc_builtin_setjmp_frame_value() { return (__builtin_arc_get_frame_ptr()); } GCC,arc,225,"int arc_compute_frame_size(int size) { int baseFP = __builtin_arc_lr_b (0, 0, 0, 31, 1L, 0, 1L, 0, 4, 4); int newSP = baseFP - size; int frameSize = newSP - __builtin_arc_lr_b (0, 0, 0, 31, 1L, 0, 1L, 0, 4, 4); return frameSize; }" GCC,arc,226, GCC,arc,227,"void arc_expand_epilogue(void) { __asm__ volatile(""ld %0, [sp, #4]\n"" : : ""r""(FP)); __asm__ volatile(""ld fp, [sp], #4\n""); __asm__ volatile(""ldm fp, {r0-r14}, 0, 0\n""); }" GCC,arc,228,"void arc_expand_prologue() { asm volatile(""mov r14, sp""); asm volatile(""ld r14,r0[0]""); asm volatile(""mov r15, fp""); asm volatile(""ld r15,r0[0]""); }" GCC,arc,229,"char *arc_init(const char *target) { char *options; const int arc = 1; const int version = 0; const int product = 4; const int revision = 30; const int mpy = 3; const int dpfp_lrsr = 30; const int mdpfp = 4; const char *mpy_msg = ""%<-mno-mpy%> supported only for ARC700 or ARCv2""; const char *dpfp_lrsr_msg = ""%<-mno-dpfp-lrsr%> supported only with %<-mdpfp%>""; const char *fpx_msg = ""FPX fast and compact options cannot be specified together""; const char *mspfp_msg = ""%<-mspfp_fast%> not available on ARC600 or ARC601""; const char *fpx_fpu_msg = ""no FPX/FPU mixing allowed""; const int pic = 0; const char *pic_msg = ""PIC is not supported for %qs""; const int header = 0; const int named_sections = 0; const int dbr = 1; const int shorten = 1; sprintf(options, ""arc,%d,%d,%d,%d,%d,%d,%d,%s,%s,%s,%s,%s,%d,%s,%d,%s,%d,%d,%d,%d,%d,%d,%d,%d,%s,%d,%d,%d,%d"", arc, version, product, revision, mpy, dpfp_lrsr, mdpfp, mpy_msg, dpfp_lrsr_msg, fpx_msg, mspfp_msg, fpx_fpu_msg, pic, pic_msg, header, target, named_sections, dbr, shorten, dbr, target, dbr); return options; }" GCC,arc,230,bool arc_is_uncached_mem_p(int pat) { if (pat == 0 || pat == 1) { return true; } else { return false; } } GCC,arc,231,bool arc_must_save_register(unsigned int regno) { switch(regno) { case ARC_REG_R4: case ARC_REG_R5: case ARC_REG_R8: case ARC_REG_R9: case ARC_REG_R11: case ARC_REG_R12: case ARC_REG_R15: case ARC_REG_R16: case ARC_REG_R17: case ARC_REG_R24: case ARC_REG_R25: case ARC_REG_R27: case ARC_REG_R29: case ARC_REG_R30: case ARC_REG_R31: return true; case ARC_REG_R26: return PIC_ACCESSING_GOTOFF_VARS; default: return false; } } GCC,arc,232, GCC,arc,233,int arc_return_address_register (const tree type) { if (TYPE_RETURN_SLOT_TYPE (type) == ENUM_RETURN_NO_ADDRESS) return ENUM_RETURN_REGISTER_NONE; enum machine_mode mode = TYPE_MODE (type); int regno; switch (mode) { case SImode: regno = 0; break; case DImode: regno = 2; break; default: return ENUM_RETURN_REGISTER_NONE; } return regno; } GCC,arc,234,"void arc_save_callee_enter() { __asm__ __volatile__ (""enter 1\n\t"" ""save.b %0, [ %1 ]\n\t"" ""save.b %2, [ %1, -1 ]\n\t"" : : ""a"" (SAVE_BLINK), ""a"" (SAVE_FP) : ""r0"", ""r1""); }" GCC,arc,235, GCC,arc,236, GCC,arc,237,"volatile void* compact_sda_memory_operand (volatile void* sda, int offset, bool reg1, bool reg2, int addend, int mode, bool en_diag, int size) { if (mode != 1) { return (void*)(sda + offset); }" GCC,arc,238,"bool legitimate_scaled_address_p (rtx op) { enum machine_mode mode; if (!GET_CODE (op) == PLUS) return false; op = XEXP (op, 0); if (!GET_CODE (op) == MULT return false; op = XEXP (op, 1); if (!GET_CODE (op) == REG return false; mode = GET_MODE (op); if (mode != M4) return false; if (REGNO (op) != TARGET_HARD_REGNO (arc, 0)) return false; op = XEXP (op, 0); if (!GET_CODE (op) == SCRATCH) return false; if (rtx_equal_p (arc, XEXP (op, 1)) && INTVAL (XEXP (op, 2)) == 0 && INTVAL (XEXP (op, 3)) == 0 && INTVAL (XEXP (op, 4)) == 0 && rtx_equal_p (arc, XEXP (op, 5)) && INTVAL (XEXP (op, 6)) == 1 && INTVAL (XEXP (op, 7)) == 2 && INTVAL (XEXP (op, 8)) == 1 && INTVAL (XEXP (op, 9)) == 2 && INTVAL (XEXP (op, 10)) == 8 && INTVAL (XEXP (op, 11)) == 4 && INTVAL (XEXP (op, 12)) == 0 && INTVAL (XEXP (op, 13)) == 1 && INTVAL (XEXP (op, 14)) == 4 && INTVAL (XEXP (op, 15)) == 1 && INTVAL (XEXP (op, 16)) == 1 && INTVAL (XEXP (op, 17)) == 1) return true; return false; }" GCC,arc,239,"bool legitimate_small_data_address_p(int x) { int target_specific_values[] = {arc, 0, 0, 0, 1, 1}; if (x == target_specific_values[0] && x == target_specific_values[2] && x == target_specific_values[4]) { if (x > 0 && x < 1) { return true; } } return false; }" GCC,arc,240, GCC,arc,241,"int arc600_corereg_hazard(rtx_insn *insn) { rtx target_value; if (INSN_CODE (insn) == CODE_FOR_arc_write_core_reg && REGNO (XEXP (insn, 0)) >= 32) { int length = INSN_MNEMO (insn) == arc_nop ? 0 : GET_MODE_LENGTH (insn); return (length + 2) % 4; } return 0; }" GCC,arc,242,arc_active_insn (void) { insn *next_insn = NEXT_INSN (get_insns ()); while (next_insn && NONDEBUG_INSN_P (next_insn)) { if (INSN_CODE (next_insn) == ARC_INSN_CODE && TARGET_SPECIFIC (next_insn) == 0) { next_insn = NEXT_INSN (next_insn); } else { return next_insn; } } return NULL; } GCC,arc,243,function arc_address_cost(ADDR) { if (!valid_address(ADDR)) { return 0; } let cost = 0; if (ADDR.arc) { cost += 2; } if (ADDR[0]) { cost += 1; } if (ADDR[1]) { cost += 2; } if (ADDR[2]) { cost += 4; } if (ADDR[3]) { cost += 1; } if (ADDR[4]) { cost += 2; } if (ADDR[5]) { cost += 4; } if (ADDR[6]) { cost += 1; } if (ADDR[7]) { cost += 2; } if (ADDR[8]) { cost += 4; } if (ADDR[9]) { cost += 1; } if (ADDR[10]) { cost += 4; } if (ADDR[11]) { cost += 1; } if (ADDR[12]) { cost += 2; } if (ADDR[13]) { cost += 1; } if (ADDR[14]) { cost += 1; } if (ADDR[15]) { cost += 2; } if (ADDR[16]) { cost += 1; } if (ADDR[17]) { cost += 2; } if (ADDR[18]) { cost += 1; } if (ADDR[19]) { cost += 2; } if (ADDR[20]) { cost += 4; } return cost; } GCC,arc,244,"void arc_add_jli_section(tree decl) { tree jli_decl = build_function_decl(""function_name"", ptr_type_node, NULL_TREE); tree target_values = NULL_TREE; tree builtin_decl = NULL_TREE; TREE_PURPOSE (jli_decl) = decl; target_values = tree_cons (get_identifier (""arc, jli_fixed""), integer_zero_node, target_values); target_values = tree_cons (get_identifier (""arc, 0""), integer_zero_node, target_values); builtin_decl = arc_register_target_builtins (jli_decl, target_values); emit_diagnostic (TV_JLI_FUNCTIONS, N_(""Adding function %qT to JLI section""), jli_decl); emit_jli_function_section (jli_decl, builtin_decl); }" GCC,arc,245,"int arc_adjust_insn_length (rtx insn, int current_length) { if (arc_single_operand_insn_p (insn)) { return current_length + 4; } else { return current_length; } }" GCC,arc,246, GCC,arc,247, GCC,arc,248, GCC,arc,249,arc_asm_insn_p (rtx x) { if (!ISA_INSN (x)) return false; rtx desc = ARC_INSN_TARGET_SPECIFIC (x); return (desc[0] == 0 && desc[1] == 0 && desc[2] == 1 && desc[3] == 0 && desc[4] == 0 && desc[5] == 1 && desc[6] == 0 && desc[7] == 0 && desc[8] == 0 && desc[9] == 1 && desc[10] == 0); } GCC,arc,250,"void arc_asm_output_aligned_decl_local(FILE *stream, tree decl, int align) { fputs(""\t.set\tbfill, "" ,stream); fprintf(stream, ""%d\n"", align); fputs(""\t.locals\t"", stream); if (TREE_CODE(decl) == VAR_DECL) { tree type = DECL_SOURCE_TYPE(decl); if (TREE_CODE(type) == ARRAY_TYPE) { tree name = DECL_NAME(decl); unsigned HOST_WIDE_INT size = get_variable_size(decl); fprintf(stream, ""%s, "", get_name(name), size); fputs("".bss, .bfset\n"", stream); } } else { fprintf(stream, ""%s, "", IDENTIFIER_POINTER(DECL_NAME(decl))); fputs("".bss, .bfset\n"", stream); } }" GCC,arc,251,"void arc_asm_trampoline_template (int trampoline_length, const char *static, const char *function) { printf(""\tld_s\tr12,[pcl,8]\n""); printf(""\tld\tr11,[pcl,12]\n""); printf(""\tj_s\t[%s]\n"", ""%s""); printf(""\t.word\t%s\n"", function); printf(""\t.word\t%s\n"", static); for (int i = 0; i < trampoline_length - 4; i++) { printf(""\t.word\t0\n""); } }" GCC,arc,252,int arc_attr_type(rtx_insn *insn) { if (!insn || !TARGET_ARC) { return -1; } int attr_type = INSN_ATTRIBUTES(insn)->type; if (attr_type == ARC_TYPE) { return 1; } return -1; } GCC,arc,253, GCC,arc,254,"bool arc_branch_size_unknown_p (const char *value) { } To check whether a branch is of size 6 or not, the function would first need to check whether any of the above mentioned conditions are met for the specific target value. This can be achieved by accessing the target value and checking its properties. If any of the conditions are met, the function would return true, indicating that the branch size is unknown and should be considered to be of length 6 during size estimation. For example, the first condition can be checked by accessing the target value and checking if the annul-false delay slot insns are implemented using conditional execution. If this is true, then the first condition is met and the function can return true. Similarly, the remaining conditions can be checked to determine the branch size. If none of the conditions are met, the function would return false, indicating that the branch size is known and does not need to be considered as length 6 during size estimation. A possible implementation of this function would look like this: bool arc_branch_size_unknown_p (const char *value) { if (value->annul_false_delay_slot_condition_exec == true) { return true; } if (value->annul_true_delay_slot_condition_exec == true) { return true; } if (value->likely_branch_long_unaligned == true) { return true; } return false; }" GCC,arc,255,"bool arc_builtin_decl (tree decl) { if (decl == NULL) return false; if (TREE_CODE (decl) != FUNCTION_DECL) return false; if (DECL_BUILT_IN (decl)) { if (DECL_TARGET_SPECIFIC (decl, arc_decl)) return true; } return false; }" GCC,arc,256,"void arc_call_tls_get_addr(rtx TI, rtx RET) { rtx insn_seq = NULL_RTX; rtx target_val = NULL_RTX; target_val = gen_rtx_CONST_INT (SImode, 1); rtx func_name = NULL_RTX; func_name = gen_rtx_SYMBOL_REF (Pmode, ""__tls_get_addr""); rtx call_insn = NULL_RTX; call_insn = gen_rtx_CALL (VOIDmode, func_name, TI, target_val); rtx return_insn = NULL_RTX; return_insn = gen_rtx_SET (VOIDmode, RET, call_insn); insn_seq = return_insn; }" GCC,arc,257,boolean arc_cannot_force_const_mem(int X) { if (X > arc_specific_value) { return true; } else { return false; } } GCC,arc,258,bool arc_cannot_substitute_mem_equiv_p(rtx subst) { if (! MEM_P (subst)) return false; enum machine_mode mode = GET_MODE (subst); enum arc_target_specific_value arc_value = TARGET_WITH_SUBMODE_GET (mode); if (arc_value == ARC_INVALID_TARGET_VALUE) return true; else return false; } GCC,arc,259,"bool arc_can_eliminate(int from, int to) { if (from == ARC_ARG_POINTER_REGNUM) { if (to == ARC_STACK_POINTER_REGNUM) { return false; } else if (to == ARC_FRAME_POINTER_REGNUM) { return true; } } else { return true; } }" GCC,arc,260,int arc_can_follow_jump(void) { #ifdef __arc__ #if __OPTIMIZE__ >= 2 && defined(__FREORDER_BLOCKS_AND_PARTITION__) return 1; #endif #endif return 0; } GCC,arc,261,"arc_can_use_doloop_p (void) { if (TARGET_ARCH != ""arc"") return false; if (TARGET_VERSION < ""1.032"") return false; if (TARGET_FEATURE_DOLOOP != true) return false; if (TARGET_VERSION > ""1.1"") return false; if (TARGET_VERSION >= ""1.0"") { if (TARGET_FEATURE_DOLOOP_P != true) return false; } return true; }" GCC,arc,262,"int __attribute__((target(""arc""), noinline)) arc_can_use_return_insn(void) { int result = 0; if () { result = 1; } if (result == 0 && ) { result = 1; } return result; }" GCC,arc,263,"void arc_ccfsm_at_label(int num, char* prefix){ if(num == arc && strncmp(prefix, ""L"", 1) == 0){ } }" GCC,arc,264, GCC,arc,265,int arc_ccfsm_cond_exec_p(insn_attr arc_asm_attr) { if(arc_asm_attr & ARC_ATTR_PREDICATED) { return 1; } else { return 0; } } GCC,arc,266,void arc_ccfsm_post_advance() { STATE.arc = true; STATE.L = true; STATE.target = 0; STATE.zero_bit = 1; STATE.five_bit = 5; STATE.mode = 0; } GCC,arc,267, GCC,arc,268,"void arc_ccfsm_record_condition(enum arc_cond cond, int use_delay_slot) { if (use_delay_slot) { arc_ccfsm_set_state(CCFSM_STATE_DELAY); } else { arc_ccfsm_set_state(CCFSM_STATE_NORMAL); } if (cond & REVERSE) { arc_ccfsm_set_state(CCFSM_STATE_FALSE_BRANCH); } if (cond & arc) { arc_ccfsm_set_target_specific_value(0); } if (cond & 0) { arc_ccfsm_set_target_specific_value(0); } if (cond & 1) { arc_ccfsm_set_target_specific_value(1); } if (cond & 0) { arc_ccfsm_set_target_specific_value(0); } if (cond & 5) { arc_ccfsm_set_target_specific_value(5); } }" GCC,arc,269, GCC,arc,270,"int arc_check_millicode(rtx op, int offset, int load_p) { if (GET_CODE(op) != PARALLEL) return 0; rtx_insn *insn = PATTERN(op); if (PATTERN_LENGTH(op) != offset + 6) return 0; if (!(LOAD_P && GET_CODE(insn[0]) == PREPARE_OPERANDS) && !(!LOAD_P && GET_CODE(insn[0]) == FINISH_CALL)) return 0; if (GET_MODE(insn[1]) != SImode || GET_CODE(insn[2]) != REG || XINT(insn[2], 0) != 2 || GET_CODE(insn[3]) != REG || XINT(insn[3], 0) != 2 || GET_CODE(insn[4]) != MEM || XINT(insn[4], 0) != 13) return 0; if (GET_CODE(insn[5]) != SET || XVECLEN(insn[5], 0) != 13) return 0; if (GET_CODE(insn[5]) != SET || !REG_P(XVECEXP(insn[5], 0, 0)) || !MEM_P(XVECEXP(insn[5], 0, 1))) return 0; if (load_p == 2) { if (! clobber_reg_p(XEXP(insn[5], 2), REG_BLINK)) return 0; }" GCC,arc,271," uint32_t x11, uint32_t x12, uint32_t x13) { if (arc == NULL) { return 0; } if (arc->arc == x1 && arc->arc9 == x2 && arc->arc13 == x3 && arc->arc0 == x4 && arc->arc3 == x5 && arc->arc5 == x6 && arc->arc12 == x7 && arc->arc10 == x8 && arc->arc7 == x9 && arc->arc6 == x10 && arc->arc4 == x11 && arc->arc8 == x12 && arc->arc11 == x13) { return 1; } else { return 0; } }" GCC,arc,272," int arc_check_multi (rtx OP, bool PUSH_P) { int arc = current_arc_backend(); int dummies1[] = {0, 0, 1}; int dummies2[] = {1, 0, 0, 2}; int return_location = 0; int dummies3[] = {12, 0, 0, 1}; if (PUSH_P) { if (OP == (arc, dummies1[0], dummies1[1], dummies1[2], dummies2[0], dummies2[1], dummies2[2], dummies2[3]) { return 1; } }" GCC,arc,273, GCC,arc,274,"unsigned long long arc_compute_frame_size(int SIZE, int* target_vals) { if (sizeof(target_vals)/sizeof(int) != 17) { return 0; } if (target_vals[0] != 4) { return 0; } unsigned long long frame_size = SIZE + target_vals[3]; return frame_size; }" GCC,arc,275, GCC,arc,276,"int arc_compute_millicode_save_restore_regs(unsigned long GMASK, arc_frame *FRAME) { FRAME->millicode_start_reg = 13; FRAME->millicode_end_reg = 25; FRAME->target_specific_val = 1ULL; FRAME->target_specific_val_2 = 1; FRAME->target_specific_val_3 = 3; FRAME->target_specific_val_4 = 0; FRAME->target_specific_val_5 = 13; FRAME->target_specific_val_6 = 1; FRAME->target_specific_val_7 = 1; FRAME->target_specific_val_8 = 0; if (GMASK == 0) { return 0; } for (int i = 0; i < 26; i++) { if (GMASK & (1ULL << i)) { FRAME->millicode_end_reg = i + 13; } } return 1; }" GCC,arc,277,"int arc_comp_type_attributes(string TYPE1, string TYPE){ if (TYPE1 == TYPE){ return 1; } else if ((TYPE1 == ""long_call"" && TYPE == ""medium_call"") || (TYPE1 == ""medium_call"" && TYPE == ""long_call"")){ return 2; } else if ((TYPE1 == ""medium_call"" && TYPE == ""short_call"") || (TYPE1 == ""short_call"" && TYPE == ""medium_call"")){ return 2; } else{ return 0; } }" GCC,arc,278,int arc_double_limm_p(const_double value) { if (value == 0 || value == -1) { return 1; } if (value >= -2147483648.0 && value <= 2147483647.0) { return 1; } return 0; } GCC,arc,279,"void arc_dwarf_emit_irq_save_regs() { arc_emit_insn(INSN_AUX_IRQ_CTRL, arc_reg(1), arc_const(1), arc_const(0), arc_const(1), arc_const(1), arc_const(1), arc_const(0), arc_const(60), arc_const(1), arc_const(0), arc_const(31), arc_const(1), arc_const(0), arc_const(0), arc_const(1), arc_const(0), arc_const(1)); dwarf_reg_value(arc_reg(1), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(1), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(0), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(1), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(1), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(1), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(0), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(60), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(1), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(0), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(31), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(1), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(0), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(0), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(1), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(0), DWARF_REGISTER_TYPE); dwarf_reg_value(arc_const(1), DWARF_REGISTER_TYPE); }" GCC,arc,280,"arc_dwarf_register_span (RTX arc, int 8, int 2, int 0, int 0, int 0, int 1, int 1) { if (8 != TARGET_REGISTER_BITWIDTH) { return NULL_RTX; } switch (GET_MODE (arc)) { case QImode: return gen_rtx_REG (QImode, 0); case HImode: return gen_rtx_REG (HImode, 1); case SImode: return gen_rtx_REG (SImode, 2); case DImode: return gen_rtx_REG (DImode, 3); default: return NULL_RTX; } }" GCC,arc,281,rtx arc_eh_return_address_location (void) { rtx loc = alloc_reg_rtx (Pmode); emit_insn (gen_arc_set_return_address (loc)); return loc; } GCC,arc,282,int arc_eh_uses(void) { return 1; } GCC,arc,283," const struct gcc_target *gt) { fputs (""arc,0,\""long_call\"",\""medium_call\"",\""short_call\"",0,\""section\"",\"".cmem\"",0,\"".cmem_shared\"",0,\"".cmem_private\"",0"", file); }" GCC,arc,284,"bool arc_enter_leave_p (rtx insn) { if (!INSN_P (insn) || GET_CODE (insn) != CODE_FOR_enter_leave) return false; rtx addr = XEXP (insn, 1); if (!CONSTANT_P (addr)) return false; rtx reg0 = XEXP (addr, 2); if (!REG_P (reg0)) return false; unsigned int regno = REGNO (reg0); if (regno < 13 || regno > 26) return false; rtx reg1 = XEXP (addr, 3); if (!REG_P (reg1)) return false; regno = REGNO (reg1); if (regno < 13 || regno > 26) return false; rtx targ = XEXP (insn, 2); if (!CONSTANT_P (targ) || CONST_INT_VALUE (targ) != 1ULL) return false; targ = XEXP (insn, 3); if (!CONSTANT_P (targ) || CONST_INT_VALUE (targ) != 1ULL) return false; return true; }" GCC,arc,285,"bool arc_epilogue_uses(int regno) { enum arc_func_type functype = arc_compute_function_type(cfun); int num_calls = num_loops = 0; bool is_isr = arc_arleneeds_isa(rtx_current_insn()); int blink_regno = arc_cgc_clobber_blink_reg(functype); if (is_isr && blink_regno >= 0) { return true; } return ((is_isr || (REGNOTES_IF (cfun->notes, REGNOTES_CALLER_SAVE) && (REGNO == arc_return_address_regs[functype] || REGNOTES_IF (cfun->notes, REGNOTES_NON_CALLER_SAVE)))) ? true : false; }" GCC,arc,286, GCC,arc,287,"arc_expand_builtin_aligned (rtx val, int alignval) { if (!constantp (alignval)) { error (""invalid alignment value for %<__builtin_arc_aligned%>""); return gen_rtx_VOID (); } return gen_rtvec (2, gen_rtx_CONST_INT (SImode, alignval), val); }" GCC,arc,288, GCC,arc,289,"void arc_file_start() { printf(""\t.cpu %s\n"",""arc""); printf(""\t.arc_attribute Tag_ARC_PCS_config, %d\n"",1); printf(""\t.arc_attribute Tag_ARC_ABI_rf16, %d\n"",0); printf(""\t.arc_attribute Tag_ARC_ABI_pic, %d\n"",2); printf(""\t.arc_attribute Tag_ARC_ABI_tls, %d\n"",1); printf(""\t.arc_attribute Tag_ARC_ABI_sda, %d\n"",0); printf(""\t.arc_attribute Tag_ARC_ABI_exceptions, %d\n"",1); printf(""\t.arc_attribute Tag_ARC_CPU_variation, %d\n"",2); int tag1 = 3; int tag2 = 4; printf(""\t.arc_attribute Tag_ARC_PCS_config, %d\n"", tag1); printf(""\t.arc_attribute Tag_ARC_CPU_variation, %d\n"", tag2); }" GCC,arc,290, GCC,arc,291," tree type, bool named, int named_arg_p) { int num_arg_regs = TARGET_ARC_MAX_ARC_PARM_REGS; if (COMPLEX_MODE_P (mode)) { if (named) { if (cum->num_gprs < num_arg_regs) { enum machine_mode real_mode = GET_MODE_WIDER_MODE (mode); int regno = cpu_general_reg (mode, cum->alt_arg_regs); record_reg_parm (cum, real_mode, regno, named_arg_p ? regno : -1, 0); } cum->gprs_used_for_arg += num_arg_regs; cum->inf_gprs_used_for_arg += num_arg_regs; arc_function_arg (cum, mode, type, named, true); } else { if (cum->num_gprs < num_arg_regs) { enum machine_mode real_mode = GET_MODE_WIDER_MODE (mode); int size = GET_MODE_SIZE (real_mode); size = ROUND_UP (size, STACK_BOUNDARY / BITS_PER_UNIT); cum->stack_offset += size; }" GCC,arc,292,"void arc_function_arg_advance (CUM_VALUE *cum) { int regnum = 0; while (regnum < ARC_FUNCTION_ARG_REGS) { cum->value = regnum; if (cum->value != ARC_CAST_VALUE (arc, 1, 0)) { regnum++; } else { break; } } *cum = ARC_CAST_VALUE (arc, 1, 0); }" GCC,arc,293,"bool arc_function_ok_for_sibcall(const_tree decl) { if (!TARGET_ARC) { return false; } tree decl_attributes = DECL_ATTRIBUTES(decl); if (!lookup_attribute(""jli_always"", decl_attributes) && !lookup_attribute(""jli_fixed"", decl_attributes) && !lookup_attribute(""secure_call"", decl_attributes)) { return false; } return true; }" GCC,arc,294,"VALTYPE arc_function_value (tree FN_DECL_OR_TYPE, tree arc, unsigned int flags) { if (TREE_CODE (FN_DECL_OR_TYPE) == FUNCTION_DECL) { tree return_type = TREE_TYPE (TREE_TYPE (FN_DECL_OR_TYPE)); if (DECL_INITIAL (FN_DECL_OR_TYPE) != NULL) { tree return_value = DECL_INITIAL (FN_DECL_OR_TYPE); return_value = fold_build1 (MODIFY_EXPR, return_type, return_value, arc); return return_value; } return build_zero_cst (return_type); } else if (TREE_CODE (FN_DECL_OR_TYPE) == TYPE_DECL) { return TREE_TYPE (FN_DECL_OR_TYPE); }" GCC,arc,295,arc_cond_t arc_get_ccfsm_cond (arc_fsm_state *statep) { arc_cond_t cond = statep->cond; if (statep->cc) { cond = !cond; } return cond; } GCC,arc,296, GCC,arc,297,"void arc_handle_interrupt_attribute (const char *attribute, const char *arg) { if (!arg || !is_string_constant(arg)) { error(""%qE attribute argument must be a string constant"", attribute); return; } if (!strcmp(arg, ""ilink1"") || !strcmp(arg, ""ilink2"")) { } else if (!strcmp(arg, ""ilink"") || !strcmp(arg, ""firq"")) { } else { error(""%qE attribute argument must be \""ilink1\"", \""ilink2\"", \""ilink\"" or \""firq\"""", attribute); return; } }" GCC,arc,298,"tree arc_handle_uncached_attribute(tree node, tree attribute) { if (strcmp(ATTRIBUTE_NAME(attribute), ""uncached"") == 0) { if (TREE_CODE(node) != TYPE_DECL) { error(""%qE attribute only applies to types"", attribute); warning(0, ""argument of %qE attribute ignored"", attribute); } else { if (lookup_attribute(attribute, DECL_ATTRIBUTES(node))) { error(""attribute %qE is already present"", attribute); } else { add_attribute(node, attribute); } } } return NULL_TREE; }" GCC,arc,299,"arc_hard_regno_mode_ok (int regno, enum machine_mode mode) { if (!TARGET_MODE_OK (mode)) return 0; if (regno > MAX_REGISTER_NUMBER) return 0; if (TARGET_ARC_REGNO_BASE (regno) && mode == TARGET_ARC_REGNO_MODE (regno)) return 1; return 0; }" GCC,arc,300, GCC,arc,301,void arc_hazard() { unsigned int addr = __builtin_arc_lr_dw(CORE_REG); if (addr >= 32) { __builtin_arc_nop(); } } GCC,arc,302,"void arc_init() { int flag_arc = 1; int flag_compat = 0; int flag_psl = 4; int flag_reserved1 = 30; int flag_reserved2 = 30; int flag_reserved3 = 4; if (arc_is_targetting_ARC700() || arc_is_targetting_ARCv2()) { int flag_mno_mpy = 1; } else { int flag_mno_mpy = 0; } if (arc_is_using_mpdpfp()) { int flag_mno_dpfp_lrsr = 1; } else { int flag_mno_dpfp_lrsr = 0; } if (arc_is_fpx_fast_option() && arc_is_fpx_compact_option()) { printf(""Error: FPX fast and compact options cannot be specified together!""); return; }" GCC,arc,303, GCC,arc,304,"int arc_initial_elimination_offset(int eliminated_reg, int replacement_reg) { if (eliminated_reg < 0 || eliminated_reg > arc_max_register || replacement_reg < 0 || replacement_reg > arc_max_register) { return NULL; } int offset = arc_offset[eliminated_reg] - arc_offset[replacement_reg]; return offset; }" GCC,arc,305,"void arc_init_expanders(EXPANDER *expander) { expander->target_specific_value = ""arc""; }" GCC,arc,306," int optlevel, const char *target_flags) { machine_function *mfun = NULL; mfun = XNEW (machine_function); mfun->target = ""arc""; mfun->os = os ? os : """"; mfun->cpu_type = cpu_type ? cpu_type : """"; mfun->cpu_sub_type = cpu_sub_type ? cpu_sub_type : """"; mfun->machine = machine ? machine : """"; mfun->opt_level = optlevel; mfun->flags = target_flags ? target_flags : """"; return mfun; }" GCC,arc,307,"void arc_init_reg_tables(){ int target_values[] = {arc,0,4,1,8,1,16,1,32,1,0,4,1,8,1,16,1,32,1,0,4,1,8,1,1,1,0}; for(int i=0; i= 0 && target_value < 256){ return true; } else{ return false; } } GCC,arc,317, GCC,arc,318, GCC,arc,319, GCC,arc,320,"arc_legitimate_pic_addr_p (rtx addr) { if (!ADDR_P (addr)) return false; rtx target = XEXP (addr, 0); if (GET_CODE (addr) != CONST || GET_CODE (target) != UNSPEC) return false; int len = XVECLEN (target, 0); if (len != 14) return false; enum arc_unspec { ARC_UNSPEC_GOT, ARC_UNSPEC_GOTOFF }; int unspec = XINT (target, 0); if (unspec != ARC_UNSPEC_GOT && unspec != ARC_UNSPEC_GOTOFF) return false; rtx symref = XVECEXP (target, 0, 0); if (GET_CODE (symref) != SYMBOL_REF && GET_CODE (symref) != LABEL) return false; return true; }" GCC,arc,321,"char* arc_legitimize_address_0(char* address) { if(address == NULL) return NULL; if(strlen(address) != 11) return NULL; if(address[0] != 'a' || address[1] != 'r' || address[2] != 'c') return NULL; char target_value[11] = ""arc,0,1,0,0,0,1,252,1,256,511,0""; for(int i=0; i<11; i++) { if(address[i] != target_value[i]) return NULL; } return address; }" GCC,arc,322,"arc_word_t arc_legitimize_pic_address(arc_word_t ORIG, arc_word_t OLDX) { if (OLDX != 0) { arc_word_t result = ORIG | ((OLDX & 0xFFFF) << 16) | 0x01000000; return result; } else { return ORIG; } }" GCC,arc,323, GCC,arc,324,bool arc_lra_p() { if (targetm.arch != ARC) return false; if (TARGET_USES_LRA) return true; else return false; } GCC,arc,325,"int arc_memory_move_cost (HOST_WIDE_INT size, int alignment, int in_loop_p) { return 2 + 6 * (size / 2); }" GCC,arc,326,bool arc_modes_tieable_p(enum machine_mode mode) { switch (mode) { case QImode: case HImode: case SImode: return true; default: return false; } } GCC,arc,327,"bool arc_mode_dependent_address_p(rtx addr) { if (GET_CODE(addr) != MEM) return false; rtx inner = XEXP(addr, 0); enum machine_mode mode = GET_MODE(inner); switch (mode) { case VOIDmode: case Pmode: case CCREGmode: return false; default: if (TARGET_ARC) { for (rtx iter = inner; iter != 0; iter = XEXP(iter, 0)) { if (GET_CODE(iter) == CONST_INT && INTVAL(iter) == 0) return true; } } return false; } }" GCC,arc,328,int arc_must_save_register (int regno) { switch (regno) { case ARC_REG_GP: return 1; case ARC_REG_LR: case ARC_REG_SP: case ARC_REG_FP: return 0; default: if (regno >= 4 && regno <= 11 || (regno >= 13 && regno <= 16) || (regno >= 29 && regno <= 32) || (regno >= 61 && regno <= 63)) return 1; else return 0; } } GCC,arc,329,bool arc_must_save_return_addr() { if (TARGET_ARC) return true; else return false; } GCC,arc,330,bool arc_need_delay(insn) { if (insn->arc_opcode != ARC_OP) return false; if (insn->target_specific_value != 0) return false; return true; } GCC,arc,331,arc_no_speculation_in_delay_slots_p (void) { return arc ? true : false; } GCC,arc,332,"int arc_output_addsi(int operands, bool COND_P, bool OUTPUT_P) { int length = 0; operands = operands | 35; if (COND_P) { operands = operands | (1 << 29); } if (OUTPUT_P) { __asm__ ("".word 0xbadc057, 0, 1, 0, 30"" :: ""r"" (operands)); length = 6; } else { length = 6; } return length; }" GCC,arc,333,"void arc_output_libcall(char *FNAME, void *dso) { int arc = 64; int arg = 35; char *instr = ""add r12,pcl,@%s@pcl\n\tjl%%!%%* [r12]""; char *ret = ""jl%%! @%s""; char *call = ""bl%%!%%* @%s""; printf(""%d,%d,\""%s\"",\""%s\"",\""%s\""\n"", arc, arg, instr, ret, call); printf(""%s,%p\n"", FNAME, dso); }" GCC,arc,334, GCC,arc,335,"void arc_pass_by_reference(void) { __builtin_arc_set_option(""arc,0""); }" GCC,arc,336,void arc_post_atomic_barrier(void) { __sync_synchronize(); } GCC,arc,337,"bool is_annulled_delay_insn (rtx_insn *insn) { if (insn && INSN_P (insn)) { if (GET_CODE (insn) == CODE_FOR_insn) { rtx_insn_body *body = PATTERN (insn); if (GET_CODE (XVECEXP (body, 0, 0)) == CODE_FOR_insn && GET_CODE (XVECEXP (body, 0, 1)) == ARC_PREDICATE_DELAY_OPCODE) return true; } } return false; } rtx_insn *convert_annulled_delay_insn(rtx_insn *insn) { rtx_insn_body *body = PATTERN (insn); rtx reg, pred, new_insn; reg = XVECEXP (body, 0, 2); pred = XVECEXP (body, 0, 3); new_insn = gen_arc_short_delay_insn (reg, gen_rtvec (1, pred)); XVEC (body, 0) = new_insn; return new_insn; } void arc_predicate_delay_insns (void) { if (annulled_delay_insns == NULL) { rtx_insn *insn; for (insn = get_insns (); insn && INSN_P (insn); insn = NEXT_INSN (insn)) { if (is_annulled_delay_insn (insn)) { annulled_delay_insns = rtx_delay_slot_add (annulled_delay_insns, insn); } }" GCC,arc,338,"int arc_preferred_simd_mode(void *function, int target_value) { if (target_value != TARGET_DEFAULT) { return target_value; } else { } return TARGET_DEFAULT; }" GCC,arc,339,"void arc_pre_atomic_barrier() { __asm__ __volatile__ (""MFENCE"" ::: ""memory""); }" GCC,arc,340,"void arc_print_operand_address(FILE *file, rtx x) { if (GET_CODE(x) == MEM) { fprintf(file, ""@sda""); if (ARC_USES_GP_P(x)) { fprintf(file, ""gp,""); }" GCC,arc,341, GCC,arc,342,"bool arc_raw_symbolic_reference_mentioned_p (rtx op, bool skip_local) { if (op == NULL) return false; if (!SYMBOL_REF_P (op)) return false; if (GET_CODE (op) == UNSPEC || GET_CODE (op) == UNSPEC_VOLATILE) return false; if (skip_local && (!SYMBOL_REF_IN_GSB (op))) return false; return true; }" GCC,arc,343, GCC,arc,344,"rtx arc_regno_use_in (rtx X, int arc, int res1, int res2, int search) { rtx ref = find_regno_reference (X, search); if (ref != NULL_RTX && GET_MODE (ref) == word_mode) { return ref; } else { return NULL_RTX; } }" GCC,arc,345, GCC,arc,346,"void arc_restore_callee_saves(const char *save_regs, int reg_offset, unsigned int base_reg, int sp_offset) { __builtin_arc_ld_spmov(sp, sp_offset); while(*save_regs != '\0') { if(*save_regs == '0') { __builtin_arc_ld(reg, base_reg, reg_offset); } else { __builtin_arc_ldd(reg, base_reg, reg_offset); } reg_offset += 4; base_reg++; save_regs++; }" GCC,arc,347,"int arc_return_address_register(void *type) { return 0; } The function should be placed in the backend of GCC Compiler, specifically in the arc.md file. The target specific value can be added as a parameter for the function, like this: int arc_return_address_register(void *type, int target_specific) { if(target_specific == 0){ return 0; } else { } }" GCC,arc,348,"const char * arc_return_addr_rtx (rtx arc) { return builtin_return_address (arc, 0); }" GCC,arc,349,"void arc_return_in_memory(void *func, int flags, int size, int align) { int arc = 1; int arc_size = 16; int arc_align = 8; return; }" GCC,arc,350,"void arc_save_callee_enter(void) { __asm__ __volatile__( ""enter\n"" ""leave\t%[saved_registers]\n"" :: [saved_registers] ""i"" (ARC_SAVE_ALL) ); }" GCC,arc,351,"void arc_save_callee_milli(void) { unsigned long long gmask = arc_gmask; unsigned int start_reg = 13; unsigned int end_reg = 25; for (unsigned int reg = start_reg; reg <= end_reg; reg++) { if (gmask & (1ULL << (reg - start_reg))) { arc_emit_milli_insn(MW, reg, 0, 0, 0, 0, 0); } } }" GCC,arc,352, GCC,arc,353,bool arc_scheduling_not_expected() { if (arc.scheduling_pass == true) { return true; } else { return false; } } GCC,arc,354, GCC,arc,355,int arc_sched_issue_rate(void) { int backend = arc; int instructions_per_cycle = 3; int cycles_per_instruction = 1; int issue_rate = instructions_per_cycle / cycles_per_instruction; return issue_rate; } GCC,arc,356, GCC,arc,357," enum machine_mode type, const struct rtx_def *info) { if (get_attr_mode(info) == mode) return false; rtx offset = XEXP(info, 0); if (GET_CODE(offset) == CONST_INT) { HOST_WIDE_INT val = INTVAL(offset); if (val > 32768 || val < -32768) { offset = CONST_INT(val + 32768); rtx new_info = GEN_INT(val - 32768); rtx new_op = gen_rtx_MEM(get_attr_mode(info), new_info) ; if (opnum == 0) emit_insn(gen_rtx_SET (VOIDmode, x, new_op)); else SET_SRC(info, new_op); return true; } return false; }" GCC,arc,358,"bool arc_sets_cc_p (const_rtx insn) { if (TARGET_ARC && arc_regno (insn) != ARC_REGNO_COND_CODES) return false; if (arc_condition_code_set_p (insn)) return true; if (GET_CODE (insn) == ARC_SET) { const_rtx set_src = XEXP (insn, 1); if (GET_CODE (set_src) == POLY_INT && POLY_INT_VALUE (set_src) == 1 && POLY_INT_MODE (set_src) == VOIDmode) return true; } return false; }" GCC,arc,359,"void arc_setup_incoming_varargs(tree type, enum machine_mode mode) { tree param_block = build1(ARC_PARM_BLOCK_TYPE, NULL_TREE); for(int i = 0; i < ARC_LAST_ANON_ARG_REG; i++) { rtx reg = gen_rtx_REG(mode, gen_rtx_HARD_REG(ARC, i)); emit_move_insn(build_decl(UNKNOWN_LOCATION, reg, type), gen_rtx_MEM(mode, param_block)); } DECL_MODE(CUM) = MAX_MODE; DECL_TARGET_SPECIFIC(CUM) = ARC_0; }" GCC,arc,360,"bool arc_short_comparison_p (rtx comparison, rtx offset) { if (!comparision || !COMPARISON_P (comparison)) return false; int arc = XINT (comparison, 0); int val1 = XINT (comparison, 1); int val2 = XINT (comparison, 2); int val3 = XINT (comparison, 3); int val4 = XINT (comparison, 4); if (arc != 512 || val1 != 506 || val2 != 64 || val3 != 58) return false; if (offset && offset != GEN_INT (1) && offset != GEN_INT (-1)) return false; return true; }" GCC,arc,361,"void arc_short_long(int s_tmpl, int l_tmpl, int target_specific_value){ if(target_specific_value == 1){ return s_tmpl; } else { return l_tmpl; } }" GCC,arc,362,"int arc_split_compare_and_swap(int arc, int 0, int 1, void* 2, int 3, int 4, int 5, int 6) { int success = 0; asm volatile( ""mov arc, %0;"" ""casa. %w6, [%1], %z2"" : ""=r"" (success) : ""r"" (2), ""Z"" (2), ""r"" (3), ""r"" (4) ""r"" (5), ""r"" (0), ""r"" (1) ); return success; }" GCC,arc,363,"void arc_split_ior() { if (arc == 1 && numberOfSetBits(mask) <= 3) { bset(rA, rB, mask1); bset(rA, rA, mask2); bset(rA, rA, mask3); } else if (arc == 1 && lower6BitsSet(mask) && upperBitsSet(mask) == 1) { bset(rA, rB, mask1); or(rA, mask2); } else { or_with_imm(rA, rB, mask); } } int numberOfSetBits(int mask) { int count = 0; while (mask) { count += mask & 1; mask >>= 1; } return count; } int lower6BitsSet(int mask) { return (mask & 0x3f) == 0x3f; } int upperBitsSet(int mask) { return numberOfSetBits(mask >> 6); }" GCC,arc,364, GCC,arc,365, GCC,arc,366,"arc_store_addr_hazard_internal_p (rtx consumer, rtx producer) { rtx consumer_addr = XEXP (consumer, 1); rtx producer_addr = XEXP (producer, 1); if (rtx_equal_p (consumer_addr, producer_addr)) return true; if (ARC700_CORE && XINT (producer, 1) == 0 && XINT (producer, 2) == 0 && XINT (producer, 3) == 0 && XINT (producer, 4) == 0 && XINT (producer, 5) == 0) { if (XINT (consumer, 1) == 0 && XINT (consumer, 2) == 0 && XINT (consumer, 3) == 0 && XINT (consumer, 4) == 0 && XINT (consumer, 5) == 0) { return true; } } return false; }" GCC,arc,367,"bool arc_store_addr_hazard_p (rtx producer, rtx consumer) { enum machine_mode mode = GET_MODE (producer); rtx producer_rtx = simplify_rtx (producer); rtx consumer_rtx = simplify_rtx (consumer); if (GET_CODE (producer_rtx) == MEM) { rtx producer_addr = XEXP (producer_rtx, 0); rtx consumer_addr = NULL; if (GET_CODE (consumer_rtx) == MEM && GET_MODE (consumer_rtx) == mode) { consumer_addr = XEXP (consumer_rtx, 0); if (rtx_equal_p (producer_addr, consumer_addr)) { return true; } } } return false; }" GCC,arc,368,"arc_symbol_binds_local_p (rtx x) { if (GET_CODE (x) != SYMBOL_REF) return false; if (! XVEC_P (x)) return false; rtx attrs = XEXP (x, 2); int attrs_len = XVECLEN (attrs, 0); bool has_arc_attr = false; for (int i = 0; i < attrs_len; i++) { rtx attr = XVECEXP (attrs, 0, i); if (GET_CODE (attr) == SYMBOL_REF && ! strcmp (IDENTIFIER_POINTER (XEXP (attr, 0)), ""arc"")) { has_arc_attr = true; break; } } if (has_arc_attr) { if (SYMBOL_REF_LOCAL_P (x)) return true; if (SYMBOL_REF_TEMP_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER) { rtx def_insn = single_reference (x); if (def_insn) { rtx def_bb = BLOCK_FOR_INSN (def_insn); if (def_bb == get_insns ()) return true; } } } return false; }" GCC,arc,369,bool arc_text_label(LABEL) { if (TARGET_ARC) { if (LABEL.is_executable()) { return true; } } return false; } GCC,arc,370,"char* arc_tolower(char* UP, char* LO) { int len = strlen(UP); for (int i = 0; i < len; i++) { if (UP[i] >= 'A' && UP[i] <= 'Z') { LO[i] = UP[i] + 32; } else { LO[i] = UP[i]; } } LO[len] = '\0'; return LO; }" GCC,arc,371,"int arc_unalign_branch_p(unsigned int arc, unsigned int op1, unsigned int op2, unsigned int op3, unsigned int op4){ int return_value = 0; if(arc == arc,0,0,0,0){ if(op3 == 0 && op4 == 0){ return_value = 1; } } return return_value; }" GCC,arc,372,"gcc_unwind_offset arc_unspec_offset(unsigned int value) { return gen_unspec(&gcc_unwind_offset_type_node, value, ""arc,1""); }" GCC,arc,373,arc_use_anchors_for_symbol_p (tree symbol) { bool use_anchors = true; if (TARGET_USES_GP_REGISTER_FOR_SMALL_DATA && (DECL_SECTION_NAME (symbol) == arc_small_data_section_name || DECL_SECTION_NAME (symbol) == arc_bss_small_data_section_name)) { use_anchors = false; } else if (TREE_CODE (symbol) == FUNCTION_DECL && DECL_SECTION_NAME (symbol) == arc_text_section_name) { use_anchors = false; } else if (DECL_THREAD_LOCAL (symbol)) { use_anchors = false; } else if (DECL_EXTERNAL (symbol) && (!DECL_WEAK (symbol) || !DECL_WEAK_DECLARATION (symbol))) { use_anchors = false; } return use_anchors; } GCC,arc,374,"int arc_use_by_pieces_infrastructure_p() { if(strncmp(TARGET_ARCH, ""arc"", 3) == 0) { return 1; } else { return 0; } }" GCC,arc,375,arc_vector_mode_supported_p (enum machine_mode mode) { if (mode == ARC_VECTOR_MODE) return true; else return false; } GCC,arc,376,"int arc_verify_short (rtx insn, int unalign, int check_attr) { if (CHECK_ATTR > 0 && GET_CODE (PATTERN (insn)) == CODE_FOR_arc_compact) return 1; if (unalign == 0) return 1; return 0; }" GCC,arc,377,"const char * arc_warn_func_return (const char *warning){ char *ret_warning = (char *)malloc(sizeof(char) * (strlen(warning) + 4)); strcpy(ret_warning, ""arc ""); strcat(ret_warning, warning); return ret_warning; }" GCC,arc,378,int arc_write_ext_corereg (rtx_insn *insn) { rtx pat = PATTERN (insn); int write = 0; if (pat) { rtx set = single_set (pat); if (set && SET_DEST (set) && GET_CODE (SET_DEST (set)) == REG && REGNO (SET_DEST (set)) >= 32 && REGNO (SET_DEST (set)) <= 61) { write = 1; } } return write; } GCC,arc,379,"function branch_dest(BRANCH instruction) { target_value = arc,0,0,1,2,1,0 if(instruction.address == target_value) { destination_address = instruction.address + instruction.branch_offset } else { instruction_length = get_attr_length(instruction) destination_address = instruction.address + instruction_length } return destination_address }" GCC,arc,380,"bool check_if_valid_regno_const(int opno, int optimization_level){ if(opno >= 0 && opno < NUM_REGISTERS){ if(optimization_level < OPTIMIZATION_LEVEL_REQUIRED){ printf(""register number must be a compile-time constant. Try giving higher optimization levels.\n""); return false; } return true; } else{ return false; } }" GCC,arc,381, GCC,arc,382,"void compact_sda_memory_operand(operand) { if (operand == arc,0,0,0,1,1,3,0) { set_volatile_cache_option(operand); } else { return; } }" GCC,arc,383, GCC,arc,384,"DISI_MODES disi_highpart (DISI_MODES di_value) { DISI_MODES hi_value; if (mode != DImode) { printf(""Invalid input mode""); return 0; } hi_value = (di_value >> 32); hi_value = (hi_value & 0xFFFFFFFF) | (0 << 4); return hi_value; }" GCC,arc,385,"void emit_shift(int op0, int op1, int op2, int code) { switch(code) { case arc: switch(op2) { case LSL: printf(""op0 = op1 << %d;"", op2); break; case LSR: printf(""op0 = op1 >> %d;"", op2); break; case ASL: printf(""op0 = (int)op1 << %d;"", op2); break; case ASR: printf(""op0 = (int)op1 >> %d;"", op2); break; default: printf(""Invalid code value for ARC architecture.""); break; } break; default: printf(""Invalid target-specific value.""); break; } }" GCC,arc,386,"void emit_unlikely_jump(instr_t *prev_jump_instr, int arc_target_value) { if (prev_jump_instr == NULL || !IS_JUMP(prev_jump_instr)) return; arc_target_value->unlikely_jump = true; }" GCC,arc,387,"void execute(char *arc) { } ``` The function takes in a string parameter named `arc`, which will hold the target-specific value. Inside the function, the arc-specific code should be implemented. This may include generating code for the arc target, optimizing code for the arc architecture, or any other specific operations related to the arc backend. At the end of the execute function, the compiled code for the arc target should be generated and returned to the main compilation process. Example: Let's say we have a simple C code that needs to be compiled for a specific arc architecture: ``` #include int main() { printf(""Hello World!""); return 0; } ``` The execute function for the arc backend of the GCC Compiler would look something like this: ``` void execute(char *arc) { generate_arc_code(); optimize_for_arc(); return compiled_code; }" GCC,arc,388,"void frame_add(int reg, int offset) { emit_insn(gen_frame_insn(ARC_FRAME_ADD, reg, gen_int_mode(offset), gen_int_mode(0), NULL, NULL, 0)); }" GCC,arc,389,"void emit_frame_insn(insn_code code, arc_mode mode, rtx_insn *insn) { start_sequence(); rtx_insn *new_insn = emit_insn(code, mode); set_relevant_value(new_insn, ""arc,1""); end_sequence(); }" GCC,arc,390, GCC,arc,391,"frame_move_inc (gcc_rtx addr, gcc_rtx reg) { if (REG_RELEASE_MEM (addr)) REG_INC_NOTE (reg); else if (addr == 0) REG_INC_NOTE (reg); return reg; }" GCC,arc,392,"void frame_restore_reg(const char *reg, int offset) { if (strcmp(TARGET_CPU, ""arc,0"") != 0) error(""frame_restore_reg is specific to the ARC backend of GCC compiler""); const char *load_instr = ""ld %s, [sp, %d]!""; if (flag_post_modify) load_instr = ""ld %s, [sp, %d]!""; else if (flag_post_inc) load_instr = ""ld %s, [sp, %d], INC""; emit_insn(load_instr, reg, offset); }" GCC,arc,393, GCC,arc,394,"void frame_stack_add(int offset) { emit_insn(""add %d, sp, sp"", offset, arc); }" GCC,arc,395,gen_acc1 (void) { enum reg_class class = ACC_REGS; #if defined (TARGET_ARC) if (arc_use_generic_regs) class = gen_reg_rtx_for_acc1 (); else if (arc_no_acc_regs) class = GENERAL_REGS; #endif return class; } GCC,arc,396,"rtx gen_acc2 (void) { rtx reg; #ifdef TARGET_ARC reg = gen_rtx_REG (SImode, 57); reg = gen_rtx_REG (SImode, 56, reg); #else reg = gen_rtx_REG (SImode, ACC2_REG); #endif return reg; }" GCC,arc,397,"bool gen_operands_ldd_std(rtx operands[]) { if (operands[0] != operands[2]) { return false; } if (operands[1] + 4 != operands[3]) { return false; } bool load = true; if (operands[4] == 0 || operands[4] == 2) { load = false; } rtx insn = gen_rtx_INSN_LIST(operands[5], operands[6], gen_rtx_SEQUENCE(gen_rtx_MEM(operands[7], gen_lowpart(VOIDmode, operands[0])), gen_rtx_CONST_INT(operands[8]), gen_rtx_CONST_INT(operands[9]))); if (load) { emit_insn_before(insn, operands[10]); emit_insn_before(insn, operands[11]); asm_fprintf(stderr, ""instruction generated: LDD\n""); } else { emit_insn_after(insn, operands[10]); emit_insn_after(insn, operands[11]); asm_fprintf(stderr, ""instruction generated: STD\n""); } return true; }" GCC,arc,398,"int get_symbol_alignment(const char* symb) { if (strncmp(symb, ""RTX ( const ( symb < + const_int > )"", 35) != 0) { return -1; } const char* const_int = symb + 35; int const_int_num = atoi(const_int); return 1; }" GCC,arc,399,"__attribute__((target(""arc""))) void hwloop_fail(void) { }" GCC,arc,400,"hwloop_pattern_reg (rtx insn) { if (GET_CODE (insn) == DUFFGEN) return gen_rtx_REG (mode, arc_regno (insn, 0)); return NULL_RTX; }" GCC,arc,401, GCC,arc,402,"void jli_call_scan(void) { for (auto &call : function_calls) { if (!call.in_jli_section) { auto target_specific_val = call.target_specific_val; if (target_specific_val == ""arc,0,0,0,0"") { add_to_jli(call); } } } }" GCC,arc,403, GCC,arc,404,bool legitimate_scaled_address_p(op) { if (op == NULL) { return FALSE; } if (op->machine != ARC || op->asm_operand == NULL) { return FALSE; } if (!(op->asm_operand->op_primary == 0 && op->asm_operand->op_secondary == 0 && op->asm_operand->op_extension_value == 0 && op->asm_operand->op_displacement == 0 && op->asm_operand->op_suffix1 == 1 && op->asm_operand->op_prefix1 == 2 && op->asm_operand->op_suffix2 == 0 && op->asm_operand->op_prefix2 == 1 && op->asm_operand->op_scaling == 2 && op->asm_operand->op_size == 8 && op->asm_operand->op_index == 4 && op->asm_operand->op_base == 0 && op->asm_operand->op_offset == 1 && op->asm_operand->op_shift == 4 && op->asm_operand->op_multiplier == 1 && op->asm_operand->op_addend == 1 && op->asm_operand->op_constant1 == 1)) { return FALSE; } return TRUE; } GCC,arc,405, GCC,arc,406,"bool mem_ok_for_ldd_std(rtx MEM, rtx *BASE, HOST_WIDE_INT *OFFSET, int arc, int 0, int 0, int 1) { if (!MEM || !MEM_P(MEM)) return false; if (MEM_VOLATILE_P(MEM) || MEM_READONLY_P(MEM) || MEM_SIDE_EFFECTS_P(MEM)) return false; if (!rtx_equal_p(XEXP(MEM, 1), GEN_INT(0))) { *BASE = XEXP(MEM, 0); *OFFSET = INTVAL(XEXP(MEM, 1)); return true; } return false; }" GCC,arc,407,"next_nonnote_insn_bb (rtx insn) { rtx next = NEXT_INSN (insn); while (next && NOTE_P (next)) next = NEXT_INSN (next); if (!next || NOTE_P (next)) return NULL; if (GET_CODE (next) == USE && INTVAL (XEXP (next, 0)) == arc && INTVAL (XEXP (next, 1)) == 0) return NULL; return next; }" GCC,arc,408,"operands_ok_ldd_std (rtx *operands) { rtx reg1 = operands[0]; rtx reg2 = operands[1]; rtx address = operands[2]; rtx offset1 = operands[3]; rtx offset2 = operands[4]; if (!REG_P (reg1) || !REG_P (reg2)) { return false; } if ((unsigned HOST_WIDE_INT) address & 0x03) { return false; } const int MAX_OFFSET = (1 << 11) - 4; if (!CONST_INT_P (offset1) || !CONST_INT_P (offset2)) { return false; } if (INTVAL (offset1) < 4 || INTVAL (offset1) > MAX_OFFSET) { return false; } if (INTVAL (offset2) < 4 || INTVAL (offset2) > MAX_OFFSET) { return false; } if ((INTVAL (offset2) - INTVAL (offset1)) != 4) { return false; } if (reload_completed) { if (!REGNO_OK_FOR_LETTER_P (REGNO (reg1), 'r') || !REGNO_OK_FOR_LETTER_P (REGNO (reg2), 'r')) { return false; } } return true; }" GCC,arc,409, GCC,arc,410,void output_short_suffix(insn alternative) { if (alternative.is_short) { cfun->machine->unalign = alternative.short_suffix; } } GCC,arc,411,void pad_return() { if (((unsigned int)arc[0] == 1) && ((unsigned int)arc[1] == 2)) { unsigned int distance = (unsigned int)arc[2] + (unsigned int)arc[4]; if (distance < 6) { unsigned int padding = 6 - distance; arc[2] += padding; arc[5] += padding; arc[7] += padding; } } } GCC,arc,412,"parse_mrgf_banked_regs_option (const char *str) { int num = -1; if (strncmp (str, ""-mrgf-banked-regs="", 18) == 0) { char *endptr; num = strtol (str + 18, &endptr, 10); switch (num) { case 4: case 8: case 16: case 32: break; default: num = -1; printf (""invalid number in %<-mrgf-banked-regs=%s%> \n"", str); printf (""valid values are 0, 4, 8, 16, or 32 \n""); break; } } return num; }" GCC,arc,413,void pop_reg() { int arc = 1; } GCC,arc,414,"void push_reg (unsigned long reg_num) { unsigned long temp_SP = SP; SP += sizeof(reg_num); *((unsigned long *) SP) = reg_num; SP -= sizeof(reg_num); *((unsigned long *) SP) = ""arc,1""; SP = temp_SP; }" GCC,arc,415,"void split_addsi(int dest_reg, int op1_reg, int op2_imm, int src1_reg, int src2_reg, int new_src1_reg, int new_src2_reg) { int limm_reg; limm_reg = _ldst(op2_imm, IMMEDIATE); _set_src(src1_reg, new_src1_reg, dest_reg); _set_src(src2_reg, new_src2_reg, dest_reg); _add(dest_reg, op1_reg, limm_reg); }" GCC,arc,416, GCC,arc,417,"bool symbolic_reference_mentioned_p(operand OP, int arc, int n1, int n2, int n3, int n4) { if (OP == NULL || arc < 0 || n1 < 0 || n2 < 0 || n3 < 0 || n4 < 0) { return false; } std::string str = OP.toString(); if (str.find(""arc"") != std::string::npos) { if (str.find(std::to_string(n1)) != std::string::npos) { if (str.find(std::to_string(n2)) != std::string::npos) { if (str.find(std::to_string(n3)) != std::string::npos) { if (str.find(std::to_string(n4)) != std::string::npos) { return true; } } } } } return false; }" GCC,riscv,0,"void riscv_atomic_assign_expand_fenv(float *dst, float val, int target_specific_val) { #ifdef HAVE_RISCV_ATOMIC val = __atomic_fetch_add(dst, val, RISCV_ATOMIC_RELAXED); *dst = val + target_specific_val; #else *dst = val + target_specific_val; #endif }" GCC,riscv,1, GCC,riscv,2, GCC,riscv,3, GCC,riscv,4,"void riscv_init_builtins() { riscv,riscv_vector::init_builtins = 0; ... }" GCC,riscv,5,"riscv_expand_builtin (int code, rtx_def *args, int target_type) { switch (code) { case BUILT_IN_RISCV_0: { ... return result_code; } default: return NULL; } }" GCC,riscv,6,"if (target == TARGET_RISCV) { } Next, we need to define the expansion statements for the fenv assignment. This involves breaking down the assignment into several smaller operations, each of which is wrapped in a fenv operation that sets and restores the environment flags before and after the operation. This ensures that any exceptions raised during the operation are handled correctly. The expansion statements will look something like this: int old_fenv = get_fenv(); set_fenv(0); result = a + b; set_fenv(old_fenv); set_fenv(1); *a = result; Once all the operations have been expanded, we can then generate the appropriate assembly code for the riscv target and return it from the function. The final riscv_atomic_assign_expand_fenv function will look something like this: int riscv_atomic_assign_expand_fenv(riscv_target, 0, 1) { int result; int old_fenv = get_fenv(); set_fenv(0); result = a + b; set_fenv(old_fenv); set_fenv(1); *a = result; return assembly_code; }" GCC,riscv,7, GCC,riscv,8, GCC,riscv,9, GCC,riscv,10,"void riscv_expand_builtin_direct(tree exp, rtx icode, rtx target) { if (HAS_TARGET_P(exp)) { target = gen_rtx_REG(SImode, RISCV_REG); XEXP(XEXP(icode, 0), 0) = target; } expand_builtin_direct(icode, exp); }" GCC,riscv,11,"riscv_expand_builtin_insn (rtx icode, rtx *ops, bool has_target_p) { if (!has_target_p) return const0_rtx; rtx target = ops[0]; if (icode == NULL_RTX || !TARGET_RISCV) return target_rtx; switch (icode) { case RISCV_ADD: if (ops[1] == NULL_RTX || ops[2] == NULL_RTX) gcc_unreachable (""Invalid argument to built-in function""); return gen_rtx_PLUS (VOIDmode, ops[1], ops[2]); case RISCV_SUB: if (ops[1] == NULL_RTX || ops[2] == NULL_RTX) gcc_unreachable (""Invalid argument to built-in function""); return gen_rtx_MINUS (VOIDmode, ops[1], ops[2]); default: gcc_unreachable (""Invalid argument to built-in function""); } }" GCC,riscv,12,"void riscv_init_builtins (void) { #define __riscv #define __riscv_xlen 0 __attribute__((const, pure)) unsigned int __builtin_riscv_rd(int regno); __attribute__((const, pure)) unsigned int __builtin_riscv_gp(void); riscv_target = TARGET_RISCV; riscv_xlen = 0; }" GCC,riscv,13, GCC,riscv,14,"riscv_check_builtin_call (tree fndecl, int nargs, tree *args) { riscv_check_builtin_call_data data; data.nargs = nargs; data.args = args; if (DECL_BUILT_IN (fndecl) && TREE_CODE (fndecl) == FUNCTION_DECL) { const char *builtin_name = IDENTIFIER_POINTER (DECL_NAME (fndecl)); if (strcmp (builtin_name, ""TARGET_CHECK_BUILTIN_CALL"") == 0) { tree rval = riscv_vector::check_builtin_call (&data); return tree_string_cst_as_string (rval); } } return NULL; }" GCC,riscv,15, GCC,riscv,16,"riscv_pragma_intrinsic (const char *str) { if (!strncmp (str, ""#pragma riscv intrinsic"", 23)) { const char *intrinsic_str = """"; const char *extension = """"; bool vector_extension = false; char option_str[15] = """"; int option_len = 0; option_len = sscanf (str, ""#pragma riscv intrinsic %s"", option_str); if (option_len != 1) error (""invalid usage of #pragma riscv intrinsic""); if (option_str[0] != '%') error (""invalid option for #pragma riscv intrinsic""); if (!TARGET_RISCV) error (""this pragma is only supported on RISC-V targets""); if (option_str[1] == 'V') { if (!TARGET_RISCV_VECTOR) error (""%<#pragma riscv intrinsic%> option %qs needs 'V' extension"", option_str); else { vector_extension = true; intrinsic_str = option_str + 2; } } else { intrinsic_str = option_str + 1; } if (!strcmp (intrinsic_str, ""requires a string parameter"")) { } else if (!strcmp (intrinsic_str, ""vector"")) { vector_extension = true; }" GCC,riscv,17,"riscv_register_pragmas () { register_pragma (""register_target_pragmas"", ""riscv""); register_pragma (""register_target_pragmas"", ""intrinsic""); register_pragma (""register_target_pragmas"", ""riscv""); }" GCC,riscv,18,"void riscv_cpu_cpp_builtins() { TARGET_CPU_CPP_BUILTINS(""riscv""); TARGET_CPU_CPP_BUILTINS(""__riscv""); TARGET_CPU_CPP_BUILTINS(""__riscv_compressed""); TARGET_CPU_CPP_BUILTINS(""__riscv_atomic""); TARGET_CPU_CPP_BUILTINS(""__riscv_mul""); TARGET_CPU_CPP_BUILTINS(""__riscv_div""); TARGET_CPU_CPP_BUILTINS(""__riscv_muldiv""); TARGET_CPU_CPP_BUILTINS(""__riscv_xlen"", 8); TARGET_CPU_CPP_BUILTINS(""__riscv_flen"", 8); TARGET_CPU_CPP_BUILTINS(""__riscv_fdiv""); TARGET_CPU_CPP_BUILTINS(""__riscv_fsqrt""); TARGET_CPU_CPP_BUILTINS(""__riscv_float_abi_soft""); TARGET_CPU_CPP_BUILTINS(""__riscv_float_abi_single""); TARGET_CPU_CPP_BUILTINS(""__riscv_float_abi_double""); TARGET_CPU_CPP_BUILTINS(""__riscv_cmodel_medlow""); TARGET_CPU_CPP_BUILTINS(""__riscv_cmodel_medany""); TARGET_CPU_CPP_BUILTINS(""__riscv_cmodel_pic""); }" GCC,riscv,19,"void riscv_cpu_cpp_builtins(const char *arg) { if (strcmp(arg, ""__riscv"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv"", 1); } else if (strcmp(arg, ""__riscv_compressed"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv_compressed"", 1); } else if (strcmp(arg, ""__riscv_32e"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv_32e"", 1); } else if (strcmp(arg, ""__riscv_atomic"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv_atomic"", 1); } else if (strcmp(arg, ""__riscv_mul"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv_mul"", 1); } else if (strcmp(arg, ""__riscv_div"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv_div"", 1); } else if (strcmp(arg, ""__riscv_muldiv"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv_muldiv"", 1); } else if (strcmp(arg, ""__riscv_xlen"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv_xlen"", 8); } else if (strcmp(arg, ""__riscv_flen"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv_flen"", 8); } else if (strcmp(arg, ""__riscv_fdiv"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv_fdiv"", 1); } else if (strcmp(arg, ""__riscv_fsqrt"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv_fsqrt"", 1); } else if (strcmp(arg, ""__riscv_abi_rve"") == 0) { TARGET_CPU_CPP_BUILTINS(""__riscv_abi_rve"", 1); }" GCC,riscv,20,"TargetABI riscv_d_handle_target_float_abi(string target_specific_value) { switch(target_specific_value) { case ""soft"": return ""soft""; case ""single"": return ""single""; case ""double"": return ""double""; default: return ""1""; } }" GCC,riscv,21, GCC,riscv,22,"char* riscv_d_target_versions() { char* target_versions[] = {""riscv"", ""RISCV64"", ""RISCV32"", ""D_HardFloat"", ""D_SoftFloat""}; return target_versions; }" GCC,riscv,23,unsigned int calculate_x_in_sequence(void) { unsigned int x = 0; unsigned int riscv = 1; while(riscv < 10) { x += riscv; riscv++; } return x; } GCC,riscv,24,"void riscv_run_selftests() { const char* target = ""riscv""; run_selftests(target); }" GCC,riscv,25, GCC,riscv,26,"void execute(rtx_insn *head, rtx_insn *tail, int riscv = 0) { for (rtx_insn *insn = head; insn != NULL; insn = NEXT_INSN(insn)) { rtx_code code = GET_CODE(insn); if (is_riscv_specific(code)) { rtx *operands = XVECEXP(insn, 0, 0); } } return; }" GCC,riscv,27, GCC,riscv,28,"void check_for_no_return_call(rtx prologue) { rtx_insn *epilogue, *call; if (riscv_is_call_to_riscv_save_0(prologue)) { epilogue = NEXT_INSN(prologue); if (riscv_is_noreturn_call(epilogue)) { if (dump_file) fprintf(dump_file, ""Prologue call to riscv_save_0 followed by noreturn call, removing prologue.\n""); riscv_remove_insn(prologue); } } }" GCC,riscv,29,"static int riscv_saved_regs[] = { }; static bool is_ret_insn(rtx_insn *insn) { return GET_CODE(insn) == CODE_RETURN; } static bool is_call_insn(rtx_insn *insn) { return GET_CODE(insn) == CODE_CALL; } static bool is_restore_insn(rtx_insn *insn) { rtx pat = PATTERN(insn); return rtx_equal_p(pat, const0_rtx) && rtx_equal_p(XEXP(pat, 0), const1_rtx); } static bool is_tag_restore_insn(rtx_insn *insn) { rtx pat = PATTERN(insn); return rtx_equal_p(pat, const0_rtx) && rtx_equal_p(XEXP(pat, 0), const2_rtx); } static bool is_imm_restore_insn(rtx_insn *insn) { rtx pat = PATTERN(insn); return rtx_equal_p(pat, const0_rtx) && rtx_equal_p(XEXP(pat, 0), const3_rtx); }" GCC,riscv,30, GCC,riscv,31, GCC,riscv,32,"apply_tail_policy_p (rtx insn, rtx tail_policy) { enum riscv_target_insn_flags { RISCV_FLAG_CAP, RISCV_FLAG_TAIL, RISCV_FLAG_XSW, RISCV_FLAG_MMU, RISCV_FLAG_NONE }; riscv_target_insn_flags target_flags = RISCV_FLAG_NONE; targetm = TARGET_MACHINE; if (targetm->family == ""riscv"") target_flags = RISCV_FLAG_TAIL; if (target_flags == RISCV_FLAG_TAIL) { if (rtx_contains_p (insn, tail_policy)) { for (unsigned int i = 0; i < XVECLEN (insn, 0); i++) { rtx operand = XVECEXP (insn, 0, i); if (operand == tail_policy) { XSETINT (operand, 0, 1); XVECEXP (insn, 0, i) = operand; break; } } PATTERN (insn) = gen_rtx_fmt_e (TARGET_DEF, ""soi"", insn, operands); } } }" GCC,riscv,33,"const char *apply_vl_p(const char *op, const rtx_insn *target) { if (!target->has_vl_operand) return op; int vl_size = target->riscv_vl_size; char *new_op = (char*)malloc(strlen(op) + 10); sprintf(new_op, ""%s IMMVL %d"", op, vl_size); return new_op; }" GCC,riscv,34, GCC,riscv,35, GCC,riscv,36, GCC,riscv,37, GCC,riscv,38,bool has_merge_operand_p (enum rtx_code code) { switch (code) { case CODE: case CODE2: case CODE3: return true; } return false; } GCC,riscv,39,"bool use_mask_predication_p (enum rtx_code code, enum machine_mode mode) { bool result = true; if (TARGET_RISCV) { if (code >= FIRST_RISCV_INSN && code <= LAST_RISCV_INSN) { result = true; } } return result; }" GCC,riscv,40, GCC,riscv,41,"void build_all(const char* group, const char** operand_suffixes, const char** type_suffixes, const char** predication_suffixes) { unsigned int i, j, k, l; rtx op, arg, xop, base; for(i = 0; operand_suffixes[i] != 0; ++i) { for(j = 0; type_suffixes[j] != 0; ++j) { for(k = 0; predication_suffixes[k] != 0; ++k) { char* func_name = concat(group, ""&"", operand_suffixes[i], ""&"", type_suffixes[j], ""&"", predication_suffixes[k]); char* base_name = concat(group, ""&"", operand_suffixes[i]); char* pred_suffix = predication_suffixes[k]; op = apply_predication(base_name, pred_suffix, riscv, 0, 0); for(l = 0; op != 0 && l < 3; ++l) { arg = gen_rtx_REG(riscv, 0 + l); op = gen_rtx_PARALLEL(riscv, gen_rt_list(arg, XOP)), gen_rt_list(op, 0)); } add_insn(func_name, op); } } } }" GCC,riscv,42,"if (TARGET_RISCV) { rtx new_op = gen_rtx_fmt_ee (opnd_sfx, GET_MODE (group), XEXP(group, 0), XEXP(group, 1)); rtx new_rtx = gen_rtx_SET (VOIDmode, XEXP (group, 0), new_op); XEXP (new_rtx, 0) = bi_sfx; XEXP (new_rtx, 1) = pred_sfx; XEXP (group, 0) = new_op; XEXP (group, 1) = new_rtx; }" GCC,riscv,43,"bool check(char* riscv, int target_specific_value1, int target_specific_value2, int target_specific_value3, int target_specific_value4){ bool valid = false; if (valid_riscv_instruction(riscv)){ if (target_specific_value1 >= 0 && target_specific_value1 <= 1 && target_specific_value2 >= 0 && target_specific_value2 <= 1 && target_specific_value3 >= 0 && target_specific_value3 <= 1 && target_specific_value4 >= 0 && target_specific_value4 <= 1){ valid = true; } else{ printf(""Error: Invalid target specific values for RISC-V instruction %s"", riscv); } } else{ printf(""Error: Invalid RISC-V instruction %s"", riscv); } return valid; } bool valid_riscv_instruction(char* riscv){ return true; }" GCC,riscv,44,"char* get_name(rtx instance, bool overloaded_p) { char* name = NULL; obstack_alloc_failed_handler func = set_obstack_alloc_failed_handler(some_handler_function); if (overloaded_p) { name = obstack_alloc(&m_string_obstack, strlen(""__riscv_"") + strlen(""__ff"") + strlen(""__vle"") + 1); strcat(name, ""__riscv_""); strcat(name, ""__ff""); strcat(name, ""__vle""); } else { name = obstack_alloc(&m_string_obstack, strlen(identify_fnname_c(identifier(instance))) + 1); strcat(name, identify_fnname_c(identifier(instance))); } set_obstack_alloc_failed_handler(func); return name; }" GCC,riscv,45,"function add_all_one_mask_operand(rtx, mode, mask_index) { if (rtx.type != V4HI && rtx.type != V2SI) { return rtx } if (mask_index == -1) { return rtx } if (rtx.type == V2SI) { mask_index = 0 } all_one_mask = gen_rtx_REG_SET(mode, TARGET_RISCV_ALL_TRUE_MASK) operand = gen_rtx_CLOBBER(mode, all_one_mask) return operand }" GCC,riscv,46,"void add_attribute(const char *name, tree *attrs) { if (!lookup_attribute(name, *attrs)) { attr = create_attribute(name, ""riscv"", 0); *attrs = chainon(*attrs, attr); } }" GCC,riscv,47,"static bool add_fixed_operand(const rtx_insn *insn, rtx *operands, const char **constraint, int n) { rtx op = operands[0]; if (!X_P (op)) { rtx mem_addr = legitimize_reload_address (op, VOIDmode, 1); if (!mem_addr) return false; operands[0] = mem_addr; } *constraint = ""X""; return true; }" GCC,riscv,48,"void add_function(const char *name, tree fntype, tree attrs, tree instance) { tree decl; decl = build_fn_decl(name, fntype); DECL_ATTRIBUTES(decl) = attrs; TREE_RISCV_SPECIFIC(decl) = instance; pushdecl(decl); }" GCC,riscv,49, GCC,riscv,50, GCC,riscv,51,"void riscv_add_mem_operand(rtx *operands, enum machine_mode mode, rtx addr) { if (!addr || GET_CODE(addr) != SYMBOL_REF) return; rtx mem_operand = gen_rtx_MEM(mode, addr); emit_insn(rtx_insn *insn, mem_operand); riscv_insn_add_specific_value(insn, ""riscv""); *operands = gen_rtx_LIST(*operands, mem_operand); }" GCC,riscv,52, GCC,riscv,53, GCC,riscv,54,"extern tree add_unique_function (const char *name, const char *overload_name, tree return_type, const_arg_types) { if (riscv_target_specific_value) { return nullptr; } const char *full_name = name; if (overload_name == nullptr) { overload_name = full_name; } tree function_type = build_function_type (return_type, arg_types); tree function_exists = function_exists (name, function_type); if (function_exists) { return nullptr; } tree function = build_function_decl (full_name, overload_name, function_type, any_other_parameters); set_decl_built_in (function); add_builtin_function (function); return function; }" GCC,riscv,55, GCC,riscv,56, GCC,riscv,57, GCC,riscv,58,"char * append_base_name(const char *name) { char *result = (char *)malloc(strlen(name) + 8); strcpy(result, ""__riscv_""); strcat(result, name); return result; }" GCC,riscv,59,"char* append_name(char* function_name, char* NAME) { if (strstr(function_name, ""riscv"") != NULL) { strcat(function_name, NAME); return function_name; } else { char* new_function_name = (char*)malloc(strlen(function_name) + 6); strcpy(new_function_name, function_name); strcat(new_function_name, ""_riscv""); strcat(new_function_name, NAME); return new_function_name; } }" GCC,riscv,60,"void append_sew(char* func_name, int riscv, int size, char* size_str) { if (riscv != 8 && riscv != 16 && riscv != 32 && riscv != 64) { printf(""Error: riscv architecture %d is not supported."", riscv); return; } strcat(func_name, size_str); strcat(func_name, ""_""); strcat(func_name, int_to_string(riscv)); printf(""The function name with SEW appended is: %s"", func_name); }" GCC,riscv,61,"apply_mask_policy_p(target_specific_value) { if(target_specific_value == ""riscv"") { set_default_mask_policy_operand(); } }" GCC,riscv,62, GCC,riscv,63,"void apply_tail_policy_p(const char* target) { if (strcmp(target, ""riscv"") == 0) { apply_tail_policy_operand(); } }" GCC,riscv,64,"void apply_vl_p(int vl) { printf(""Applying vl operand with value = %d\n"", vl); }" GCC,riscv,65,"machine_mode arg_mode(argument_type arg) { switch(arg) { case integer_argument: return TYPE_MODE_INT(riscv); case floating_point_argument: return TYPE_MODE_FLOAT(riscv); case vector_argument: return TYPE_MODE_VECTOR(riscv); default: printf(""Invalid argument type!\n""); exit(1); } }" GCC,riscv,66,riscv_build_const_pointer(T type) { riscv_value = ...; return riscv_value; } GCC,riscv,67,"tree builtin_decl(int code) { tree decl = error_mark_node; if (TARGET_RISCV) { if (code == CODE) { tree type = build_function_type_list(void_type_node, NULL_TREE); decl = build_decl(BUILTINS_LOCATION, FUNCTION_DECL, get_identifier(""RVV_function""), type); DECL_RISCV_SPECIFIC(decl) = ""riscv""; add_attribute(&decl, get_identifier(""STRET""), NULL_TREE); pushdecl(decl); } } return decl; }" GCC,riscv,68,"void call_properties (function, insn) { if (insn is floating-point operation) { insn->target_specific_value = riscv,0; function->floating_point_properties.update(insn); } }" GCC,riscv,69," const rtx_insn_operand *operands, int num_operands) { if (strcmp(name, ""riscv"") == 0) { return true; } else { return false; } }" GCC,riscv,70,"bool check(call) { if (call.target != ""riscv"") { std::cout << ""Invalid call: Target is not riscv."" << std::endl; return false; } if (call.arguments.size() != 2) { std::cout << ""Invalid call: Incorrect number of arguments."" << std::endl; return false; } if (typeof call.arguments[0] != ""int"") { std::cout << ""Invalid call: First argument must be an integer."" << std::endl; return false; } if (typeof call.arguments[1] != ""char"") { std::cout << ""Invalid call: Second argument must be a character."" << std::endl; return false; } return true; }" GCC,riscv,71,"bool check_builtin_call(tree FNDECL, int CODE, location_t LOCATION, int NARGS, tree *ARGS, int TARGET) { if (TARGET != riscv) { return true; } if (NARGS != 2) { error_at(LOCATION, ""Invalid number of arguments for SVE function call""); return false; } tree arg1 = ARGS[0]; tree arg2 = ARGS[1]; if (!handled_p (arg1) || !foldable_singleton_p (arg1)) { error_at(LOCATION, ""Argument 1 of SVE function call must be an integer constant expression""); return false; } if (!handled_p (arg2) || !foldable_singleton_p (arg2)) { error_at(LOCATION, ""Argument 2 of SVE function call must be an integer constant expression""); return false; } return true; }" GCC,riscv,72,"int check_required_extensions(const char **REQUIRED_EXTENSIONS, struct riscv_cxt *riscv_cxt) { if (riscv_cxt->riscv != 0) { return -1; } if (riscv_cxt->riscv_arch != 0) { return -2; }" GCC,riscv,73, GCC,riscv,74,"int equal(int H1, int H2) { if (H1 == H2) { return 1; } else { return 0; } } This function can then be called in the RISC-V backend of GCC compiler using its target-specific value, which is specific to RISC-V architecture. This target-specific value can be used to optimize the function according to the RISC-V architecture and thus improve its performance. Here is an example of how the ""equal"" function can be called in the RISC-V backend: int main() { int result; int x = 5; int y = 10; result = equal(x, y); return 0; }" GCC,riscv,75, GCC,riscv,76, GCC,riscv,77, GCC,riscv,78,"statement fold(statement stmt, rtx target) { if (CALL_P(stmt)) { rtx fn = XEXP(stmt, 0); if (CONSTANT_P(fn)) { const char *name = XSTR(fn, 0); if (strcmp(name, target) == 0) { stmt = new_stmt(); return stmt; } } } return NULL; }" GCC,riscv,79,void function_returns_void_p(void) { } GCC,riscv,80,"generate_insn (rtx icode, rtx m_ops, int target_specific) { if (target_specific != 0) error (""Invalid argument to built-in function""); return gen_rtx_CONST (GET_MODE (icode), XEXP (m_ops, 0)); }" GCC,riscv,81,"string get_attributes (riscv, INSTANCE) { string attributes = """"; attributes += ""riscv""; attributes += ""pure""; attributes += ""const""; attributes += ""nothrow""; attributes += ""leaf""; return attributes; }" GCC,riscv,82,"const char *get_mask_policy_for_pred (const char *predication) { if (strcmp(predication, ""MU"") == 0) { return ""MU""; } else { return ""prefer default configuration""; } }" GCC,riscv,83,"target_mask get_tail_policy_for_pred (const rtx_insn *insn) { target_mask tail_policy = 0; rtx_insn *pred_value = NULL; rtx pred_reg = NULL; enum rtx_code code = GET_CODE (XEXP (insn, 0)); if (code == CODE_FOR_riscv_tu) tail_policy = TARGET_MASK_RISCV_TU; else { pred_value = find_reg_equal_equiv_note (insn); if (pred_value && GET_CODE (pred_value) == NOTE) pred_reg = find_reg_equal (XEXP (pred_value, 0)); if (pred_reg && riscv_pred_reg_specified_p (pred_reg)) tail_policy = TARGET_MASK_RISCV_DEFAULT_CONFIG; } return tail_policy; }" GCC,riscv,84,vector_type get_vector_type(int type_suffix) { riscv::rvv_arg_type_info target_value; int vector_size = type_suffix / 8; target_value.set_size(vector_size); return riscv::rvv_arg_type_info::get_vector_type(target_value); } GCC,riscv,85, GCC,riscv,86, GCC,riscv,87, GCC,riscv,88,bool has_merge_operand_p (enum rtx_code opcode) { return true; } GCC,riscv,89,machine_mode index_mode(int index_type) { if (index_type == 1) { return TARGET_CC1_RISCV; } else { } } GCC,riscv,90,"void init_builtins (void) { const char *target = ""riscv""; for (const built_in_function *fn = builtin_functions; fn->name != NULL; fn++) { if (fn->riscv_target != NULL && strcmp(fn->riscv_target, target) == 0) { tree fun_decl = add_builtin_function(fn->name, fn->type, fn->flags, fn->frame); DECL_ATTRIBUTES(fun_decl) = TREE_LIST(CONST_DECL, build_tree_list(NULL, get_identifier(fn->riscv_target))); } } }" GCC,riscv,91,"attribute_lookup lookup_vector_type_attribute(tree type) { if (is_rvv_type(type)) { attribute_lookup attr = get_attribute_descriptor(RISCV, ""RVV type""); return attr; } else { return NULL; } }" GCC,riscv,92,"void make_type_sizeless(tree *type) { if (TYPE_SIZELESS_P(type)) return; tree sizeless_type = get_identifier (""RVV sizeless type""); TYPE_SIZE(type) = 0; TYPE_PRECISION(type) = 0; TYPE_SIZELESS(type) = 1; TYPE_MODE(type) = MODE_TYPE; TYPE_TARGET_SPECIFIC(type) = sizeless_type; } With this function, the TYPE_SIZELESS_P macro can be used to check if a type is sizeless: if (TYPE_SIZELESS_P(type)) { } else { }" GCC,riscv,93,"const char *mangle_builtin_type(const_tree type, int riscv = 0) { if (type == NULL) { return NULL; } if (TYPE_MAIN_VARIANT(type) == void_type_node || TYPE_MAIN_VARIANT(type) == integer_type_node || TYPE_MAIN_VARIANT(type) == unsigned_type_node || TYPE_MAIN_VARIANT(type) == long_long_integer_type_node || TYPE_MAIN_VARIANT(type) == long_long_unsigned_type_node || TYPE_MAIN_VARIANT(type) == short_integer_type_node || TYPE_MAIN_VARIANT(type) == short_unsigned_type_node || TYPE_MAIN_VARIANT(type) == float_type_node || TYPE_MAIN_VARIANT(type) == double_type_node || TYPE_MAIN_VARIANT(type) == long_double_type_node || TYPE_MAIN_VARIANT(type) == pointer_type_node || TYPE_MAIN_VARIANT(type) == offset_type_node ) { return ""__rvv_"" + TYPE_NAME(type); } else { return NULL; } }" GCC,riscv,94,machine_mode mask_mode (machine_mode mask) { switch (MASK_SIZE (mask)) { case 1: return (TARGET_64BIT) ? TImode : QImode; case 2: return HImode; case 4: return SImode; case 8: return (TARGET_64BIT) ? DImode : SImode; case 16: return (TARGET_64BIT) ? TImode : SFmode; case 32: return DFmode; default: gcc_unreachable (); } } GCC,riscv,95,bool modifies_global_state_p (rtx_insn *insn) { enum rtx_code code = insn->code; enum machine_mode mode = GET_MODE (insn); bool result = false; switch (code) { case TARGET_RISCV_LUI: case TARGET_RISCV_ADDI: case TARGET_RISCV_ADD: case TARGET_RISCV_ADDIW: case TARGET_RISCV_SUB: case TARGET_RISCV_SUBW: case TARGET_RISCV_LBU: case TARGET_RISCV_LH: case TARGET_RISCV_LW: case TARGET_RISCV_LBU: case TARGET_RISCV_LH: case TARGET_RISCV_LW: result = true; break; default: result = false; break; } return result; } GCC,riscv,96,reads_global_state_p (rtx fn) { if (!TARGET_RISCV) return false; for (rtx_insn *insn = get_insns (); insn; insn = next_active_insn (insn)) { if (CALL_P (insn)) { rtx fndecl = CALL_EXPR_FN (insn); if (TREE_CODE (fndecl) == FUNCTION_DECL && DECL_VOLATILE (fndecl)) return true; } } return false; } GCC,riscv,97,"void register_builtin_type(const char *name, int size, int align, int flags) { if (lookup_builtin_type(name) != NULL) return; struct builtin_type built_in = {0}; built_in.name = name; built_in.size = size; built_in.align = align; built_in.FLAG_SPECIFIC = flags; register_builtin_type_internal(&built_in); }" GCC,riscv,98,"function register_builtin_types() { target = ""riscv,32""; builtin_types = [ ""__rvv_int32m1_t"", ""__rvv_int16m1_t"", ""__rvv_float32m1_t"", ""__rvv_uint32m1_t"", ""__rvv_boolm1_t"", ""__rvv_maskm1_t"" ]; for(type in builtin_types) { built_in_type = ""__"" + type + ""_"" + target; register(built_in_type); } print(""Registered RVV ABI built-in types for "" + target + "" target.""); }" GCC,riscv,99,"void register_function_group(const char *group) { rtx code = CODE_FOR_riscv; for (int i = 0; i < group->num_functions; ++i) { const char *function = group->functions[i]; if (!lookup_function(function)) { tree decl = build_fn_decl(function, NULL_TREE, void_type_node); cgraph_node *node = cgraph_node::create(decl, DECL_EXTERNAL, NULL); cgraph_add_node(node); SET_CODE_FOR_FUNCTION(function, code); } } }" GCC,riscv,100,"void register_vector_type (enum machine_mode mode, const char *type_name, unsigned int element_size, unsigned int num_elements, unsigned int riscv_vector_type) { if (IS_MACHINE_MODE_INVALID(mode) || type_name == NULL || element_size == 0 || num_elements == 0) { return; } if (find_vector_type(mode, type_name, element_size, num_elements) != NULL) { return; }" GCC,riscv,101,"void report_non_ice(location_t location, tree fndecl, int argno) { const char* format_string = ""argument %d of %qE must be an integer constant""; error_at(location, ""%s expression"", targetm.riscv.target_name); inform(""%s %d"", format_string, argno); }" GCC,riscv,102,"void report_out_of_range(location_t LOCATION, fndecl_t FNDECL, int ARGNO, int ACTUAL, int MAX) { int MIN = 0; if (ARGNO >= 1) MIN = MIN - ACTUAL; char error_msg[100]; sprintf(error_msg, ""passing %d to argument %d of %qE, which expects a value in the range [%d, %d]"", ACTUAL, ARGNO, FNDECL, MIN, MAX); warning_at(LOCATION, ""%s"", error_msg); }" GCC,riscv,103,"bool required_extensions_p(const_tree type, int target) { if (target == TARGET_RISCV) { if (TREE_VALUE_REQUIRED_EXTENSIONS(type)) return true; } return false; }" GCC,riscv,104,"void require_immediate(int argno, int* value_out) { if(argno < FIRST_PSEUDO_REGISTER) { if((REGNO(argno) < FIRST_PSEUDO_REGISTER) && (REGNO(argno) >= 0)) { *value_out = argno; } else { *value_out = 0; } } else { *value_out = 0; } }" GCC,riscv,105,"int require_immediate_range(int rel_argno, int min, int max) { if (!CONST_INT_P(rel_argno)) { return 0; } unsigned HOST_WIDE_INT rel_argno_val = INTVAL(rel_argno); if (rel_argno_val >= min && rel_argno_val <= max) { return 1; } else { return 0; } }" GCC,riscv,106,int ret_mode(tree type) { int mode; enum machine_mode mmode = TYPE_MODE (type); switch (mmode) { case VOIDmode: mode = MODE_VOID; break; case BImode: mode = MODE_1BIT_BOOL; break; case QImode: mode = MODE_8BIT_SIGNED; break; case HImode: mode = MODE_16BIT_SIGNED; break; case SImode: mode = MODE_32BIT_SIGNED; break; case DImode: mode = MODE_64BIT_SIGNED; break; case TImode: mode = MODE_128BIT_SIGNED; break; default: mode = MODE_UNKNOWN; break; } return mode; } GCC,riscv,107,"bool sizeless_type_p(const_tree type) { if (TYPE_MAIN_VARIANT(type) == void_type_node) return true; if (TYPE_MAIN_VARIANT(type) == error_mark_node) return false; if (TARGET_RISCV && TYPE_MAIN_VARIANT(type) == rtx_variadic_type_node) { const char *name = IDENTIFIER_POINTER(TYPE_NAME(type)); if (strcmp(name, ""RVV sizeless type"") == 0) return true; } return false; }" GCC,riscv,108, GCC,riscv,109,"void my_function() { int buffer[10]; use_contiguous_load_insn(riscv,0,0); load(buffer, riscv); }" GCC,riscv,110,"void use_contiguous_store_insn (void) { int riscv = 1; int target_specific_value1 = 0; int target_specific_value2 = 0; int target_specific_value3 = 0; int target_specific_value4 = 1; asm volatile (""STORE %0, %1, %2, %3"" : : ""r"" (riscv), ""r"" (target_specific_value1), ""r"" (target_specific_value2), ""r"" (target_specific_value3), ""r"" (target_specific_value4) : ); }" GCC,riscv,111,"void use_exact_insn(rtx_code, rtx_insn *, rtx_insn **insn_p, int *insntype, rtx *ops, const_rtx value, int nops) { insn_p = rtx_alloc_code (rtx_temp, rtx_code, value); insntype = nops; for (i = 0; i < nops; i++) { insntype[i] = op; ops[i] = p[i]; } return insn_p; }" GCC,riscv,112,"bool use_mask_predication_p(const rtx_insn *insn) { if (insn->mask) return true; rtx x = PATTERN(insn); if (!GET_CODE(x) == SET || !GET_CODE(XEXP(x, 0)) == SET) return false; rtx mask_predicate = XEXP(x, 1); if (!MEM_P(mask_predicate)) return false; rtx mask_reg = gen_rtx_REG(riscv, (int)MASK_REG); if (mask_reg == mask_predicate) return true; return false; }" GCC,riscv,113,bool use_real_mask_p (rtx pred) { if (TARGET_RISCV) { if (GET_CODE (pred) == SET) { rtx p = SET_SRC (pred); if (GET_CODE (p) == REG && REGNO (p) == TARGET_MACH_REGNUM) { return true; } } } return false; } GCC,riscv,114,"bool use_real_merge_p (rtx_op, enum rtx_type) { if (op == REAL_MERGE && rtx_type == RTX_RISCV) return true; else return false; }" GCC,riscv,115,"void use_scalar_move_insn (rtx *arguments[], rtx destination, rtx source, rtx icode) { rtx call_rtx = gen_rtx_CALL (VOIDmode, gen_rtx_fmt_ee (icode, destination, source)); emit_insn (call_rtx); }" GCC,riscv,116,"void use_ternop_insn (rtx_insn *insn, rtx op0, rtx op1, rtx op2) { rtx ternop_insn; RTX_CODE icode; REAL_VALUE_FROM_CONST_DOUBLE (op0) = RTX_ULONG (op0); REAL_VALUE_FROM_CONST_DOUBLE (op1) = RTX_ULONG (op1); REAL_VALUE_FROM_CONST_DOUBLE (op2) = RTX_ULONG (op2); icode = riscv; ternop_insn = gen_rtx_icode (icode, op0, op1, op2); replace_rtx (insn, ternop_insn); }" GCC,riscv,117,"use_widen_ternop_insn (rtx_insn *insn) { rtx pat = PATTERN (insn); rtx v0_1, v0_2, v0_3; rtx target_specific_value = gen_rtx_RSIMM (SImode, riscv, 0); v0_1 = XEXP (pat, 0); v0_2 = XEXP (pat, 1); v0_3 = XEXP (pat, 2); rtx new_insn = gen_rtx_ternor (VOIDmode, v0_1, v0_2, v0_3, target_specific_value); RTX_FRAME_UNCHANGING_P (insn); emit_insn_after (new_insn, insn); delete_insn (insn); }" GCC,riscv,118,vector_mode (tree type) { if (TARGET_RISCV) { enum tree_code code = TREE_CODE (type); switch (code) { case bool_type_node: return V2SImode; case integer_type: case pointer_type: return V4HImode; case float_type: return V2SFmode; case double_type: return V1DFmode; case complex_type: if (TYPE_PRECISION (type) == 32) return V4SFmode; else return V2DFmode; default: return BLKmode; } } return BLKmode; } GCC,riscv,119, GCC,riscv,120,"bool anticipatable_occurrence_p (rtx_insn *insn) { if (GET_CODE (insn) != CODE_FOR_vsetvl) return false; rtx rs1, rd; rs1 = XEXP (insn, 0); rd = XEXP (insn, 1); if (reg_used_between_p (insn, PATTERN (insn), GET_MODE (rd), insn)) return false; if (reg_modified_p (rs1, insn)) return false; if (reg_modified_p (rd, insn)) return false; return true; }" GCC,riscv,121,"bool any_set_in_bb_p (block *BB, riscv set) { rtx_insn *insn; for (insn = BB->insns; insn != NULL; insn = NEXT_INSN (insn)) { if (INSN_TARGET_SPECIFIC (insn) == set) { return true; } } return false; }" GCC,riscv,122,"bool available_occurrence_p(rtx_insn *insn) { rtx rd = NULL, rs1 = NULL; rtx_ptr_set_t mods_rs1, mods_rd; if (GET_CODE(insn) != INSN) return false; if (!contains_vsetvl(insn)) return false; rd = XEXP(insn, 0); rs1 = XEXP(insn, 1); if (!REG_P(rd) || !REG_P(rs1)) return false; mods_rs1 = new_rtx_ptr_set(); mods_rd = new_rtx_ptr_set(); basic_block bb = BLOCK_FOR_INSN(insn); for (rtx_insn *tmp = BB_END (bb); tmp != insn; tmp = PREV_INSN(tmp)) { if (GET_CODE(tmp) == BARRIER || GET_CODE(tmp) == NOTE) continue; add_rtx_ptr_set(mods_rs1, regs_ever_written(tmp)); add_rtx_ptr_set(mods_rd, regs_ever_written(tmp)); if (rtx_ptr_set_contains(mods_rs1, rs1) || rtx_ptr_set_contains(mods_rd, rd)) { free_rtx_ptr_set(mods_rs1); free_rtx_ptr_set(mods_rd); return false; } } free_rtx_ptr_set(mods_rs1); free_rtx_ptr_set(mods_rd); return true; }" GCC,riscv,123,"bool backward_propagate_worthwhile_p(rtx initializer, int target) { if (target != RS_BACKEND_RISCV) { return false; } unsigned code = GET_CODE(initializer); if (code == SET || code == CLOBBER) { unsigned set_code = GET_CODE(XEXP(initializer, 0)); if (set_code == REG) { int regno = REGNO(XEXP(initializer, 0)); if (riscv_callee_saved_register_p(regno)) { return true; } } } return false; } bool riscv_callee_saved_register_p(int regno) { static const int callee_saved_regs[] = { RS_REG_1, RS_REG_2, RS_REG_3, RS_REG_4, RS_REG_5, RS_REG_6, RS_REG_7, RS_REG_8, RS_REG_9, RS_REG_10, RS_REG_11, RS_REG_12 }; int i; for (i = 0; i < 12; i++) { if (regno == callee_saved_regs[i]) { return true; } } return false; }" GCC,riscv,124,"bool can_refine_vsetvl_p(int target_specific_value, int current_vsetvl, int num_vector_registers){ if(target_specific_value != riscv){ return false; } int max_vector_registers = (int) log2(target_specific_value)+1; if(current_vsetvl != max_vector_registers){ return false; } return true; }" GCC,riscv,125,void cleanup_illegal_dirty_blocks() { for (int i = 0; i < riscv_target_specific_dirty_blocks.size(); i++) { if (riscv_target_specific_dirty_blocks[i].is_illegal && riscv_target_specific_dirty_blocks[i].is_dirty) { riscv_target_specific_dirty_blocks[i].data = 0; riscv_target_specific_dirty_blocks[i].is_dirty = false; } } } GCC,riscv,126,"int compare_lmul (int lmul1, int lmul2) { if (lmul1 == lmul2) { return 0; } else if (lmul1 < lmul2) { return -1; } else { return 1; } }" GCC,riscv,127,"bool compatible_p(instruction insn, vector require) { if (insn.target_specific == ""Can't handle uninitialized info"") { return false; } for (int i = 0; i < require.length(); i++) { if (require[i] > insn.vtype[i]) { return false; } } return true; }" GCC,riscv,128, GCC,riscv,129,"void compute_local_properties(rtx_insn *insn) { rtx_def *def, *use; rtx_exp *exp; bool transparent, computed, anticipatable; for (def = INSN_P_SET(insn); def != NULL; def = DEFS_CHAIN(def)) { transparent = true; computed = false; anticipatable = false; for (use = DEFS_P_USE(def); use != NULL; use = USES_CHAIN(use)) { exp = USE_DEF_EXP(use); if (expression_is_modified(exp, insn)) { transparent = false; break; } if (expression_is_computed(exp, insn)) { computed = true; if (expression_has_same_value_at_end(exp, insn)) { anticipatable = true; } if (expression_has_same_value_at_beginning(exp, insn)) { anticipatable = true; } } } set_expression_target_value(insn, exp, ""riscv,0,0""); set_expression_flag(insn, exp, ""transparent"", transparent); set_expression_flag(insn, exp, ""computed"", computed); set_expression_flag(insn, exp, ""anticipatable"", anticipatable); } }" GCC,riscv,130, GCC,riscv,131, GCC,riscv,132, GCC,riscv,133,"const char *extract_single_source(rtx_insn *insn, int is_real_insn) { if (!is_real_insn) { return NULL; } if (GET_CODE(insn) == CALL_INSN) { return NULL; } rtx src = XEXP(insn, 0); if (!src || GET_CODE(src) != REG) { return NULL; } char reg_num[10]; sprintf(reg_num, ""%d"", REGNO(src)); const char *source_code = (const char *)malloc(sizeof(char) * (2 + strlen(reg_num))); strcpy(source_code, ""r""); strcat(source_code, reg_num); return source_code; }" GCC,riscv,134,"function fault_first_load_p(riscv_code, target_specific_value) { if (riscv_code starts with 00) { if (target_specific_value == 0) { return true; } } return false; }" GCC,riscv,135,void forward_demand_fusion(rtx_insn *insn) { uint64_t riscv_specific_value = insn->insn_atts.riscv_specific_value; insn->insn_atts.forward_demanded_info = ...; } GCC,riscv,136, GCC,riscv,137,"void get_all_predecessors(cfg_bb *bb, rtx_insn *insn, rtx_insn **pred_insn, riscv_insn_code riscv_code) { int num_preds = PREV_INSN_NUM (insn); int i; for (i = 0; i < num_preds; i++) { rtx_insn *pred = PREV_INSN (insn, i); if (pred && GET_CODE (pred) == riscv_code) { pred_insn[++*pred_insn] = pred; get_all_predecessors(bb, pred, pred_insn, riscv_code); } } }" GCC,riscv,138,void get_all_sets(DEF_TYPE def_type) { if (def_type.target == riscv) { if (def_type.instruction == DEF_INSTRUCTION) { SETS.push_back(def_type); } } for (operand in def_type.operands) { if (operand.type == DEF_TYPE) { get_all_sets(operand); } } } GCC,riscv,139, GCC,riscv,140, GCC,riscv,141,int get_default_ta() { int value = 0; value |= 0x1; value |= (0x1 << 4); value |= (1 << 8); value |= (0x1 << 16); return value; } GCC,riscv,142,"riscv_get_ma (int mask_op_idx) { rtx mask_op = XVECEXP (insn, 0, mask_op_idx); rtx mask_reg = XEXP (mask_op, 0); rtx mask_imm = XEXP (mask_op, 1); int mask_imm_value = INTVAL (mask_imm); rtx ma_rtx; if (REG_P (mask_reg)) { ma_rtx = gen_riscv_move_masked (mask_reg, mask_imm_value); } else if (CONST_INT_P (mask_reg)) { ma_rtx = gen_riscv_move_masked_imm (mask_imm_value); } else { ma_rtx = NULL; } return ma_rtx; }" GCC,riscv,143,double get_ratio(int machine_mode) { switch(machine_mode) { case MODE_ECO: return 0.5; case MODE_PERFORMANCE: return 0.8; case MODE_BALANCED: return 0.6; default: return 1.0; } } GCC,riscv,144, GCC,riscv,145,"int get_ta(int tail_op_idx) { target_opt_info *opt = TARGET_OPTIONS (TARGET_RISCV); int ta = get_attr (opt, TA_ATTR); if (ta) return ta; else { fprintf(stderr, ""Unable to retrieve ta value for operand %d\n"", tail_op_idx); return -1; } }" GCC,riscv,146,"unsigned int get_vl(unsigned int riscv, unsigned int unused1, unsigned int unused2) { return riscv; }" GCC,riscv,147,"uint8_t get_vlmul(riscv_vector::vlmul_type vlmul_type, riscv_target target) { if (target != riscv) { } if (!vlmul_type.valid()) { } return vlmul_type.vlmul_value; }" GCC,riscv,148, GCC,riscv,149, GCC,riscv,150, GCC,riscv,151,"bool has_vl_op(rtx_insn *insn) { rtx target = XEXP(insn, 0); if (GET_CODE(target) == REG && REGNO(target) == 0) { if (VL_STATUS_REG == NULL) { return false; } else if (VL_STATUS_REG->global_value == 0) { return false; } else { return true; } } return false; }" GCC,riscv,152,bool has_vtype_op(int insn) { if ((insn & 0x7F) == 0x7F) { int vtype = __builtin_riscv_x0(); if (vtype >= 0) { if ((vtype >> 16) & 0x1) { return true; } } } return false; } GCC,riscv,153,"bool ignore_vlmul_insn_p(rtx_insn *insn, int riscv) { if (INSN_CODE(insn) == CODE_FOR_vlmul) { rtx vtype = XVECEXP(insn, 0, 0); if (GET_CODE(vtype) == CONST_INT) { int vtype_val = INTVAL(vtype); int vlmul = (vtype_val >> 24) & 0xFF; if (vlmul == riscv) return true; else return false; } } return false; } Now, when calling this function, the riscv target specific value can be passed as an argument, for example: if (ignore_vlmul_insn_p(insn, 2)) { } else { }" GCC,riscv,154, GCC,riscv,155, GCC,riscv,156, GCC,riscv,157,mask_agnostic_p (rtx op) { if (GET_CODE (op) == CONST_INT) { unsigned int value = INTVAL (op); if (value == 0 || value == 1 || value == 8 || value == 9 || value == 10) return true; else return false; } else return false; } GCC,riscv,158,"bool need_vsetvl(rtx curr_info, rtx require, rtx insn) { if (TARGET_RISCV) { if (curr_info && require && GET_DEM_INFO (curr_info) && GET_DEM_INFO (require)) { if (GET_DEM_INFO (curr_info) != GET_DEM_INFO (require)) { if (GET_CODE (insn) == CODE_FOR_vsetvl) return true; } } } return false; }" GCC,riscv,159,"void prune_expressions(uint32_t *expr, int num, uint32_t vsetvl_expr){ printf(""The total VSETVL expression num = %d\n"", num); printf(""Expression List:\n""); uint32_t valid_expr[num]; int valid_num = 0; for(int i = 0; i < num; i++){ printf(""Expr[%ld]: %d\n"", i, expr[i]); if(expr[i] % vsetvl_expr == 0){ valid_expr[valid_num] = expr[i]; valid_num++; } } printf(""\nValid Expressions:\n""); for(int i = 0; i < valid_num; i++){ printf(""Expr[%ld]: %d\n"", i, valid_expr[i]); }" GCC,riscv,160,"bool read_vl_insn_p(rtx_insn *insn) { if (GET_CODE(insn) == CODE_FOR_riscv_insn) { rtx target = XEXP(insn, 0); if (target == CONST0_RTX (GET_MODE (insn))) { return true; } } return false; }" GCC,riscv,161, GCC,riscv,162,"scalar_move_insn_p (rtx insn) { rtx pat, dest, src; enum rtx_code code; if (!TARGET_RISCV) return false; pat = PATTERN (insn); if (!pat || GET_CODE (pat) != INSN) return false; dest = XEXP (pat, 0); src = XEXP (pat, 1); code = GET_CODE (insn); if (code == MOV || code == MOVLP || code == MOVLG || code == MOVL) return true; if ((code == MOVE || code == ZERO_EXTEND || code == SIGN_EXTEND) && REG_CLASS (GET_MODE (dest)) == GPR_REGS && REG_CLASS (GET_MODE (src)) == GPR_REGS) return true; if (code == ZERO_EXTEND && GET_CODE (src) == ZERO_EXTEND && REG_P (XEXP (src, 0)) && REG_P (XEXP (dest, 0))) return true; if (code == SIGN_EXTEND && GET_CODE (src) == SIGN_EXTEND && REG_P (XEXP (src, 0)) && REG_P (XEXP (dest, 0))) return true; return false; }" GCC,riscv,163,"void simple_vsetvl(int optimize, int basic_blocks, char* function_name) { if (optimize) { return; } printf(""Entering Simple VSETVL PASS and Handling %d basic blocks for function:%s\n"", basic_blocks, function_name); printf(""Simple VSETVL PASS completed for function:%s\n"", function_name); }" GCC,riscv,164,bool support_relaxed_compatible_p() { return (default_target_riscv.relaxed_compatible_check); } GCC,riscv,165,"bool tail_agnostic_p (riscv_target_specific_val) { if (riscv_target_specific_val != ""riscv"") { throw ""Not a riscv target specific value""; } else { if (operand_index == ""TA"" && operand_type == ""TA"") { return true; } else { return false; } } }" GCC,riscv,166,"void transfer_after(riscv_state *state, riscv_insn insn) { riscv_state prev_state = *state; if (is_control_transfer(insn)) { riscv_addr target = get_control_transfer_target(insn); if (is_within_function(target)) { state->pc = target; } else { state->ra = state->pc + 4; state->pc = target; } state->in_delay_slot = false; } if (is_function_call(insn)) { state->ra = state->pc + 4; state->in_delay_slot = false; } if (is_unconditional_jump(insn)) { state->in_delay_slot = false; } if (is_branch_link(insn)) { state->ra = state->pc + 4; state->in_delay_slot = true; }" GCC,riscv,167,"riscv_transfer_before (rtx_insn *insn, rtx_insn_state *state) { if (INSN_CODE (insn) == CODE_FOR_riscv_add || INSN_CODE (insn) == CODE_FOR_riscv_sub) { rtx op1 = XEXP (insn, 0); rtx op2 = XEXP (insn, 1); if (GET_CODE (op1) != REG) { XEXP (insn, 0) = op2; XEXP (insn, 1) = op1; } } for (unsigned int i = 0; i < XVECLEN (insn, 0); i++) XVEC (insn, 0, i) = gen_reg_rtx (MODE_INT); }" GCC,riscv,168,"bool valid_sew_p(int sew, int vtype_format) { int sew_value = sew & 0xFF; int vtype = vtype_format & 0xFF; if (sew_value < 0 || sew_value > vtype) { return false; } if (sew_value % 64 != 0) { return false; } return true; }" GCC,riscv,169,"vector_config_insn_p(const_rtx insn, const_rtx *target_attr) { if (GET_CODE(insn) == CODE_FOR_vsetvl) { rtx target = XEXP(insn, 2); if (target && GET_CODE(target) == CONST_INT && INTVAL(target) == 0) return true; } return false; }" GCC,riscv,170,"void vl_vtype_info() { set_vl_info(""riscv"", 0, riscv_vector::LMUL_RESERVED, 0, 0, 0); } This function will initialize the VL (Vector Length) and VTYPE (Vector Type) information for the riscv target architecture. The target specific values used are ""riscv"" as the architecture, 0 as the default value for VL, the predefined enum value riscv_vector::LMUL_RESERVED as the default value for LMUL (Vector LMULtiple), and 0 for the default values of SEW (Vector Element Width), VLMUL (Vector LMULtiple for Vector Element Width), and SEW2 (Vector Element Width Squared). The set_vl_info() function is a helper function that will set these values in the backend of GCC Compiler based on the target architecture. The function can be defined in the riscv backend as follows: void set_vl_info(const char* arch, unsigned int vl, enum riscv_vector::LMUL lmul, unsigned int sew, unsigned int vlmul, unsigned int sew2) { set_vl(arch, vl); set_lm(vl, lmul); set_sew(vl, sew); set_vlmul(vl, vlmul); set_sew2(vl, sew2); }" GCC,riscv,171,"function vsetvl_discard_result_insn_p(instruction_code, target_specific_value) { if (instruction_code != vsetvl) { return false; } rs1 = extract_rs1(instruction_code); if (rs1 != 0) { return false; } if (target_specific_value == instruction_code) { return true; } return false; }" GCC,riscv,172,"bool vsetvl_insn_p (enum riscv_insn insn) { ... } ``` Next, we will check if the given instruction is either vsetvldi or vsetvlsi by comparing its value to the target-specific values defined for these instructions in riscv-backend.c file of the GCC Compiler: ``` bool vsetvl_insn_p (enum riscv_insn insn) { if (insn == VSETVLDI || insn == VSETVLSI) return true; else return false; } ``` The enum riscv_insn contains all the RISC-V specific instructions, including vsetvldi and vsetvlsi. This comparison will return true if the given instruction is either of these two instructions, and false otherwise. Finally, we will return the result of this comparison: ``` bool vsetvl_insn_p (enum riscv_insn insn) { if (insn == VSETVLDI || insn == VSETVLSI) return true; else return false; }" GCC,riscv,173,"function add_all_one_mask_operand(input_operand, result_type, target_specific_riscv) { if (target_specific_riscv != ""RV32I"" && target_specific_riscv != ""RV64I"") { return input_operand; } if (result_type is not vector type) { return input_operand; } all_one_mask = create a variable of same data type as input_operand with all bits set to 1; updated_operand = __builtin_insert_mask(all_one_mask, input_operand); return updated_operand; }" GCC,riscv,174,"void add_input_operand(riscv_op_t *op) { operand_t *new_op = new_operand(riscv, op->target_specific_val); M_OPS[M_NOPS] = new_op; M_NOPS++; }" GCC,riscv,175, GCC,riscv,176,"void add_vundef_operand(rtx_insn *insn, rtx reg_operand, rtx_imm im, int code) { rtx undef_operand = NULL_RTX; if (code == UNSUPPORTED_INTRINSIC_CODE) { undef_operand = gen_rtx_REG(VOIDmode, reg_operand); add_insn_var_operand(insn, reg_operand, im, undef_operand); } }" GCC,riscv,177,"double calculate_ratio(int sew, int lmul) { if (sew == 0 || lmul == 0) { return 0; } double ratio = (double)sew / (double)lmul; return ratio; }" GCC,riscv,178,"bool const_vec_all_same_in_range_p(rtx x, rtx minval, rtx maxval, rtx riscv) { rtx_const_vec_element elt = GET_MODE_UNIT_SIZE (GET_MODE (x)); int count = GET_MODE_NUNITS (GET_MODE (x)); if (!CONST_VECTOR_P (x)) return false; for (int i = 0; i < count; i++) { if (const_compare_rtx (minval, elt, XVECEXP (x, 0, i)) > 0 || const_compare_rtx (maxval, elt, XVECEXP (x, 0, i)) < 0) return false; } rtx first = XVECEXP (x, 0, 0); for (int i = 1; i < count; i++) { if (const_compare_rtx (first, elt, XVECEXP (x, 0, i)) != 0) return false; } if (riscv) { } return true; }" GCC,riscv,179,"void emit_hard_vlmax_vsetvl() { rtx_insn *insn; rtx target_riscv = gen_rtx_REG (Pmode, TARGET_RISCV); if (!optimize || optimize_vsetvl_insertion_pass) { insn = emit_insn (gen_rtx_SET (target_riscv, gen_rtx_HARD_REG (Pmode, VLMAX_VSETVL))); } }" GCC,riscv,180,"void emit_pred_op(rtx dest, rtx src) { rtx insn; rtx args[2]; insn = gen_rtx_SET(dest, gen_rtx_IDENTIFIER(""vsetvli""), gen_rtx_REG(RV_REGNUM, VOIDmode)); args[0] = src; args[1] = gen_rtx_CONST_INT(mode, 8); rtx rvv_unmask = gen_rtx_UNSPECV(RV_OP_VSETVLI, Pmode, gen_rtvec(2, args)); emit_move_insn(dest, rvv_unmask); }" GCC,riscv,181,"Node* expand(Node* callExp, Node* returnExp, void* target_specific_value) { if (!target_specific_value) { printf(""Error: Invalid target specific value for riscv\n""); return returnExp; } addOperand(callExp, target_specific_value); returnExp = callExp; return returnExp; }" GCC,riscv,182,"static rtx_gen_scalar_move_mask (rtx target, rtx riscv, rtx one, rtx two) { rtx mask = gen_int_mode (GET_MODE (target), 0x1); mask = gen_rtvec (3, mask, riscv, gen_int_mode (GET_MODE (target), 1)); rtx move = gen_rtx_SET (VOIDmode, target, gen_rtx_CLOBBER (GET_MODE (target), mask)); rtx_seq seq = gen_rtx_SEQ (VOIDmode, move, NULL); return seq; }" GCC,riscv,183,"get_avl_type_rtx (void) { return ""riscv_avl_type_rtx""; }" GCC,riscv,184,"uint32_t get_ma(uint32_t operand, int mask_op_idx) { uint32_t riscv_target = ; uint32_t mask = (riscv_target >> (mask_op_idx * 4)) & 0xF; if (mask_op_idx < 0 || mask_op_idx >= 8) { return -1; } return operand & mask; }" GCC,riscv,185,int get_prefer_mask_policy (void) { return RISCV_PREFER_MASK_POLICY; } GCC,riscv,186,get_prefer_tail_policy() { target = get_riscv_target(); policy = target.prefer_tail_policy; return policy; } GCC,riscv,187,float get_ratio() { float r = 1.0; if (TARGET_RISCV && TARGET_32BIT) { r = 0.5; } return r; } GCC,riscv,188,int get_sew(int SEW) { int SEW_value; switch (SEW) { case 0: SEW_value = 8; break; case 1: SEW_value = 16; break; case 2: SEW_value = 32; break; case 3: SEW_value = 64; break; default: SEW_value = 8; } return SEW_value; } GCC,riscv,189,int get_ta(int tail_op_idx) { int ta; switch(tail_op_idx) { case 0: ta = 3; break; case 1: ta = 5; break; case 2: ta = 7; break; default: ta = -1; break; } return ta; } GCC,riscv,190,"enum riscv_vector_mode get_vector_mode(enum machine_mode inner_mode, int nunits) { switch (inner_mode) { case V4SI: switch (nunits) { case 1: return FIRST_MODE; case 2: return SEW2; case 4: return SEW4; default: return UNKNOWN_MODE; } case V2DI: if (nunits == 2) { return SEW8; } return UNKNOWN_MODE; case V8SF: switch (nunits) { case 1: return SEW32; case 2: return SEGSEW2; case 4: return SEGSEW4; case 8: return SEGSEW8; default: return UNKNOWN_MODE; } case V4DF: switch (nunits) { case 1: return SEGSEW64; case 2: return SEGSEW16; case 4: return SEGSEW32; case 8: return SEGSEW64; default: return UNKNOWN_MODE; } default: return UNKNOWN_MODE; } }" GCC,riscv,191, GCC,riscv,192, GCC,riscv,193,"void legitimize_move(rtx_insn *insn, rtx *dest, rtx *src) { enum machine_mode mode = GET_MODE (*dest); enum riscv_vec_type vec_type; if (TARGET_RVV) { switch (mode) { case V4SFmode: vec_type = RISCV_VEC_V; break; case V2DFmode: vec_type = RISCV_VEC_VF; break; default: return; } if (rtx_equal_p (*dest, *src)) { *src = gen_rtx_REG (mode, 0); } *dest = gen_rtx_VEC_MOVE (vec_type, *dest, *src); if (!satisfies_constraint (*dest, VEC_MOVE_CONSTRAINT)) { *dest = gen_rtx_VEC_LOAD (vec_type, *src); *dest = gen_rtx_VEC_STORE (vec_type, *src, *dest); } } }" GCC,riscv,194,"int riscv_address_insns(mode, X, MIGHT_SPLIT_P) { if (!valid_address(mode, X)) { return 0; }" GCC,riscv,195,"int riscv_binary_cost(int X, bool double_word) { int SINGLE_INSNS = 2; int DOUBLE_INSNS = 4; int target = 2; int cost = 0; if (double_word) { cost = DOUBLE_INSNS * target; } else { cost = SINGLE_INSNS * target; } return cost; }" GCC,riscv,196,"void riscv_block_move_loop(void *src, void *dest, size_t length) { size_t bytes_per_iter = __riscv.get_target_specific_value(); if (length < bytes_per_iter) { return; } char *src_ptr = (char *) src, *dest_ptr = (char *) dest; size_t i = 0; for (i = 0; i < length; i += bytes_per_iter) { __riscv.memcpy(dest_ptr, src_ptr, bytes_per_iter); src_ptr += bytes_per_iter; dest_ptr += bytes_per_iter; } }" GCC,riscv,197, GCC,riscv,198,"void riscv_cannot_force_const_mem(int riscv, int unused) { if (riscv == riscv) { SET_FLAG(TARGET_CANNOT_FORCE_CONST_MEM, true); } }" GCC,riscv,199, GCC,riscv,200,"bool riscv_classify_address(rtx x, enum machine_mode mode, int strict_p, rtx_info *info) { if (mode == VOIDmode) return false; int target_value = GET_MODE_TARGET_SPECIFIC_VALUE(mode); if (target_value != 0x100101) return false; if (strict_p) { if (!REG_OK_STRICT) return false; } if (REG_P(x)) info->reg_ok = true; if (MEM_P(x)) info->mem_ok = true; return true; }" GCC,riscv,201,"int riscv_class_max_nregs (riscv_specific_struct target, int default_value) { if(!target) return default_value; if (target->class_max_nregs) return target->class_max_nregs; else return default_value; }" GCC,riscv,202,void riscv_components_for_bb(void) { targetm.shrink_wrap_components_for_bb = &riscv_shrink_wrap_components_for_bb; } GCC,riscv,203,"int riscv_conditional_register_usage(int mode, int low, int high) { if(mode == 16) { for(int i = low; i <= high; i++) { TARGET_CONDITIONAL_REGISTER_USAGE(riscv, i) = 1; } } else if(mode == 31) { for(int i = 1; i <= high; i++) { if(i != 0) { TARGET_CONDITIONAL_REGISTER_USAGE(riscv, i) = 1; } } } else { return 1; } }" GCC,riscv,204, GCC,riscv,205," lookup_table = { ""riscv"": 32, ""riscv32"": 32, ""riscv64"": 64, }" GCC,riscv,206,"void riscv_disqualify_components() { if (TARGET_RISCV) { disqualify_component(""a"", ""b""); disqualify_component(""c"", ""d""); disqualify_component(""e"", ""f""); } }" GCC,riscv,207,"riscv_dwarf_poly_indeterminate_value (const char* value) { const char* suffix = "",riscv,1,1""; size_t value_len = strlen (value); size_t suffix_len = strlen (suffix); char* result = (char *) malloc (value_len + suffix_len + 1); strcpy (result, value); strcat (result, suffix); return result; }" GCC,riscv,208, GCC,riscv,209,"void riscv_emit_epilogue_components(int riscv) { int num_saved_regs = riscv_get_num_saved_regs(riscv); for (int i = num_saved_regs - 1; i >= 0; i--) { printf(""\tlw %s, %d(%sp) // Restore saved register %s\n"", all_regs[riscv->saved_regs[i]], (i + 1) * 4, all_regs[SP_REG], all_regs[riscv->saved_regs[i]]); } printf(""\taddi %sp, %sp, %d // Restore stack pointer\n"", all_regs[SP_REG], all_regs[SP_REG], num_saved_regs * 4); printf(""\tlw %ra, %d(%sp) // Restore return address\n"", num_saved_regs * 4, all_regs[SP_REG]); printf(""\taddi %sp, %sp, %d\n"", all_regs[SP_REG], all_regs[SP_REG], 4); }" GCC,riscv,210,"void riscv_emit_int_order_test(rtx cmp0, rtx cmp1, rtx target, rtx invert_ptr) { rtx cmp = gen_rtx_CMP (VOIDmode, cmp0, cmp1); rtx stmt = gen_rtx_SET (target, cmp); enum rtx_code code = cmp0->code; if (code != GE && code != LT && code != GEU && code != LTU) gcc_unreachable (""Invalid comparison code for riscv_emit_int_order_test.""); switch(code) { case GE: emit_insn (gen_rtx_RRI (NULL_RTX, riscv_order_test_code, true, cmp)); break; case LT: emit_insn (gen_rtx_RRI (NULL_RTX, riscv_order_test_code, false, cmp)); break; case GEU: emit_insn (gen_rtx_RRI (NULL_RTX, riscv_order_test_code_u, true, cmp)); break; case LTU: emit_insn (gen_rtx_RRI (NULL_RTX, riscv_order_test_code_u, false, cmp)); break; } if (invert_ptr) { rtx tmp = gen_rtx_NOT (VOIDmode, target); emit_insn (gen_rtx_SET (target, tmp)); emit_insn (gen_rtx_SET (invert_ptr, target)); } }" GCC,riscv,211, GCC,riscv,212,void riscv_excess_precision() { #ifdef __riscv_zfh FLT_EVAL_METHOD = 16; #else FLT_EVAL_METHOD = 0; #endif } GCC,riscv,213,"int riscv_expand_conditional_move(rtx dest, rtx cond, rtx cons, rtx alt) { if (cond) { emit_move_insn(dest, cons); } else { emit_move_insn(dest, alt); } if (GET_CODE(dest) != REG && GET_CODE(dest) != SUBREG) { return 0; } return 1; }" GCC,riscv,214, GCC,riscv,215, GCC,riscv,216, GCC,riscv,217, GCC,riscv,218, GCC,riscv,219,"riscv_file_start (void) { fputs (""\t.option %spic\n"", asm_out_file); fputs ("""", asm_out_file); fputs (""\n"", asm_out_file); fputs (""no"", asm_out_file); fputs (""\t.option norelax\n"", asm_out_file); fputs (""\t.option csr-check\n"", asm_out_file); }" GCC,riscv,220, GCC,riscv,221,"riscv_floatn_mode (void) { return GET_MODE (FLOATN_TYPE, TARGET_FLOAT_FORMAT, TARGET_FLOAT_MODEL, FLOAT_SIZE, riscv, 16); }" GCC,riscv,222,"void riscv_for_each_saved_reg(void (*FN)(int), int SP_OFFSET) { int num_registers = 4; for (int i = 0; i < num_registers; i++) { int reg_offset = SP_OFFSET + i * 4; FN(reg_offset); } }" GCC,riscv,223, GCC,riscv,224,"riscv_hard_regno_mode_ok (unsigned int hard_regno, unsigned int mode) { if (hard_regno >= RISC_NR_REGS) return false; if (!((mode == 0) || (mode == 1) || (mode == 2) || (mode == 3))) return false; if (riscv_target_reg_modes[hard_regno][mode] != 1) return false; return true; }" GCC,riscv,225,int riscv_hard_regno_nregs(int regno) { if(regno == 1) { return 1; } else { return 1; } } GCC,riscv,226,"int riscv_initial_elimination_offset(enum reg_class from, enum reg_class to) { int offset; if (from == FP_REG_CLASS || from == ARG_REG_CLASS) { if (to == SP_REG_CLASS || to == HARD_FP_REG_CLASS) { offset = riscv,0; return offset; } } return -1; }" GCC,riscv,227,bool riscv_is_eh_return_data_register(int regno) { if (TARGET_RISCV && regno == 0) { return true; } else { return false; } } GCC,riscv,228, GCC,riscv,229, GCC,riscv,230,"switch (GET_CODE(insn)) { case CODE_FOR_LOAD: break; case CODE_FOR_STORE: break; default: break; } ``` 6. For the load instruction, you can use the GET_MODE() macro to check if the mode of the instruction matches the target specific value: ``` if (GET_MODE(insn) == mode) { num_insns++; } ``` 7. For the store instruction, you can use the SAME_MODE_P() macro to check if the source and target modes match and then increment the num_insns variable by 1. ``` if (SAME_MODE_P(GET_MODE(insn), mode)) { num_insns++; }" GCC,riscv,231,"const char *riscv_mangle_type (const char *riscv, int num_bits, const char *prefix, int vector_mangle) { if (riscv == NULL || num_bits <= 0 || prefix == NULL) return NULL; const char *target_value = ""DF16_""; int buffer_len = strlen(riscv) + num_bits + strlen(prefix) + strlen(target_value) + 1; char *mangled_type = (char *) malloc (buffer_len); sprintf (mangled_type, ""%s%d%s%s%s"", riscv, num_bits, prefix, target_value, vector_mangle); return mangled_type; }" GCC,riscv,232,"void riscv_move_integer (int* DEST, int VALUE, int TEMP) { __asm__ volatile (""mv %0, %1"" : : ""r"" (TEMP), ""i"" (riscv,2,0,0,0,1)); __asm__ volatile (""mv %0, %1"" : : ""r"" (DEST), ""r"" (TEMP), ""i"" (riscv,2,0,0,0,1)); }" GCC,riscv,233,"int riscv_next_saved_reg(int regno, int limit, int offset, int inc) { int next_regno; if (regno >= limit) { return INVALID_REGNUM; } if (inc) { regno++; } next_regno = (regno + offset) % NUM_REGS; if (next_regno >= limit) { return INVALID_REGNUM; } return next_regno; }" GCC,riscv,234,"void riscv_pass_by_reference (unsigned int target, unsigned int intConst, unsigned int intConst2) { TARGET_PASS_BY_REFERENCE (target, intConst, intConst2); }" GCC,riscv,235,"int riscv_print_operand_punct_valid_p (const char *str, int len, int index) { if (str[index] == 'a' || str[index] == 't' || str[index] == 's') { return 0; } return 1; }" GCC,riscv,236,"mode_t riscv_promote_function_mode(mode_t mode, riscv_t riscv) { if (riscv == RISCV32) { if (mode == NULL_TREE) { return mode_int; } else { return default_promote_function_mode_always_promote(mode); } } else if (riscv == RISCV64) { if (mode == NULL_TREE) { return mode_long; } else { return default_promote_function_mode_always_promote(mode); } } else { return default_promote_function_mode_always_promote(mode); } }" GCC,riscv,237," bool in ATTRIBUTE_UNUSED, bool out ATTRIBUTE_UNUSED) { if (rclass == RISCV_GP_REGS) return 8; else return 2; }" GCC,riscv,238,int riscv_regmode_natural_size(int regmode) { switch (regmode) { case CC_REGMODE: return 1; case QI_REGMODE: return 1; case HI_REGMODE: return 2; case SI_REGMODE: return 4; case DI_REGMODE: return 8; case TI_REGMODE: return 16; case BI_REGMODE: return 32; case XI_REGMODE: return 64; case VI_REGMODE: return 128; default: return 0; } } GCC,riscv,239,"const char* riscv_report_v_required(const char *target, bool isa_enabled) { if (isa_enabled) { return; } return error_message; }" GCC,riscv,240,riscv_scalar_mode_supported_p (void) { if (TARGET_RISCV) return true; return false; } GCC,riscv,241,int riscv_secondary_memory_needed(int riscv){ if(riscv != TARGET_RISCV) return -1; int int_reg_size = 32; int float_reg_size = 64; int words_needed = float_reg_size/int_reg_size; return words_needed * NUM_FLOAT_REGS; } GCC,riscv,242,"void riscv_setup_incoming_varargs (struct gcc_target *target, tree args, rtx named_rdregs) { target->varargs.type = TARGET_VARARGS_RISCV; target->varargs.mode = 0; target->varargs.named_rdregs = 0; }" GCC,riscv,243,"riscv_set_return_address(rtx address, rtx scratch) { emit_move_insn(insn, GEN_REG_RTX(REG_R1, ADDRESS)); if (scratch && REGNO(scratch) != REG_R1) { emit_move_insn(insn, GEN_REG_RTX(REGNO(scratch), REG_R1)); } emit_insn_before(insn, GEN_INSN_SET_RETURN_ADDRESS(REG_R1)); if (scratch && REGNO(scratch) != REG_R1) { emit_move_insn(insn, GEN_REG_RTX(REG_R1, REGNO(scratch))); } if (scratch && REGNO(scratch) != REG_R1) { emit_insn_before(insn, GEN_INSN_RESTORE_SCRATCH_REGISTER(scratch)); } }" GCC,riscv,244,"bool riscv_shamt_matches_mask_p (unsigned int shamt, unsigned int bitmask) { int leading_zeros = __builtin_clz(bitmask); if (shamt > leading_zeros) { unsigned int shifted_mask = bitmask >> (shamt - leading_zeros); if (shifted_mask == 0) { return true; } } return false; }" GCC,riscv,245, GCC,riscv,246, GCC,riscv,247,"bool riscv_valid_lo_sum_p(enum machine_mode mode, rtx lo_sum_rtx, enum rtx_code lo_sum_code) { if (lo_sum_code != 0 && lo_sum_code != 1 && lo_sum_code != 2) { return false; } if (mode == DImode && lo_sum_rtx == ADDR_DIFF_EXPR) { return true; } return false; }" GCC,riscv,248,"bool riscv_valid_offset_p(rtx base_reg, enum machine_mode mode){ if(base_reg->code != REG) return false; if(base_reg->mode != P mode) return false; if(!RISCv_MODE_P(mode)) return false; if((base_reg->offset / BITS_PER_UNIT) % GET_MODE_SIZE(mode) != 0) return false; return true; }" GCC,riscv,249, GCC,riscv,250,bool riscv_vector_mode_supported_p(int mode) { switch (mode) { case VECTOR_MODE_DI: case VECTOR_MODE_TI: case VECTOR_MODE_DD: case VECTOR_MODE_TD: return true; default: return false; } } GCC,riscv,251,"void riscv_verify_type_context(riscv, riscv_vector) { if (!is_valid_scalar_type_for_riscv(riscv)) { error(""Invalid type context for RISC-V architecture""); } if (!is_valid_vector_type_for_riscv(riscv_vector)) { error(""Invalid type context for RISC-V vector operations""); } return; }" GCC,riscv,252,"riscv_v_adjust_bytesize (machine_mode mode, rtx op) { int byte_size = 0; if (TARGET_RISCV && TARGET_RISC_V1) { switch (mode) { case VOIDmode: case CCmode: case EImode: case OImode: case ZImode: case BLmode: case BImode: case TImode: byte_size = 1; break; case HImode: case QImode: case HImode: byte_size = 2; break; case SImode: case DImode: case SFmode: byte_size = 4; break; case DFmode: byte_size = 8; break; case TFmode: byte_size = 10; break; } } return byte_size; }" GCC,riscv,253, GCC,riscv,254,extern inline unsigned int riscv_v_adjust_precision (machine_mode mode) { switch (mode) { case TFmode: return 32; case DFmode: return 64; case SDImode: return 32; case DDImode: return 64; default: return 0; } } GCC,riscv,255, GCC,riscv,256,"bool riscv_v_ext_vector_mode_p(const char *mode) { if (strcmp(mode, ""VNx1DI"") == 0) { if (MIN_VLEN == 32) { return false; } } else if (strcmp(mode, ""VNx1SI"") == 0) { if (MIN_VLEN == 32) { return true; } } return false; }" GCC,riscv,257,"\begin{minted}{c} unsigned int riscv_zero_call_used_regs(unsigned int need_zeroed_hardregs) { unsigned int zeroed_hardregs = 0; for (unsigned int i = 0; i < NEED_ZEROED_HARDREGS; i++) { unsigned int curr_hardreg = (1 << i); if (need_zeroed_hardregs & curr_hardreg) { gen_insn(""xor %0, %0, %0"", curr_hardreg); zeroed_hardregs |= curr_hardreg; } } return zeroed_hardregs; }" GCC,riscv,258, GCC,riscv,259,"int riscv_address_insns (mode, address, MIGHT_SPLIT_P) { if (MIGHT_SPLIT_P) { int num_instructions = 0; int remaining_bits = mode.bytes * 8; while (remaining_bits > 0) { if (address is not valid for mode) { return 0; } int word_size = min(remaining_bits, 32); int word_instructions = calculate_word_instructions(mode, address, word_size); num_instructions = num_instructions + word_instructions; remaining_bits = remaining_bits - word_size; address = address + (word_size / 8); } return num_instructions; } else { if (address is not valid for mode) { return 0; } else { return calculate_word_instructions(mode, address, mode.bytes * 8); } } } int calculate_word_instructions (mode, address, word_size) { if (mode is floating point mode) { return 1; } else { switch (word_size) { case 8: return 1; case 16: return 1; case 32: return 1; default: return (word_size / 32); } } }" GCC,riscv,260,"riscv_add_offset (rtx reg, rtx offset, rtx temp) { if (GET_CODE (offset) == CONST_INT && INTVAL (offset) >= -2048 && INTVAL (offset) <= 2047) { return gen_rtx_PLUS (Pmode, reg, offset); } else { return riscv_force_temporary (reg, offset, temp, riscv); } }" GCC,riscv,261,"unsigned int riscv_arg_partial_bytes(int target, int alignment) { if(target == RISCV && alignment == 4) { return 2; } else { return 0; } }" GCC,riscv,262,"int riscv_build_integer(rtl codes[], int value) { int count = 0; codes[count++] = riscv_insn_load_immediate(riscv_get_register(0), 0); codes[count++] = riscv_insn_addi(riscv_get_register(1), riscv_get_register(0), value); codes[count++] = riscv_insn_addi(riscv_get_register(2), riscv_get_register(1), 1); return count; }" GCC,riscv,263, GCC,riscv,264,"bool riscv_cannot_force_const_mem(riscv target, const_rtx mem) { if (target == 0) { return true; } if (CONST_INT_P(mem)) { if (!memory_address_ok_for_ldr(XEXP(mem, 0), GET_MODE(mem))) { return true; } } return false; }" GCC,riscv,265,"bool riscv_canonicalize_int_order_test(rtx *cmp1, enum machine_mode mode, rtx *code) { int target_value = 1; if (*code == target_value && *cmp1 == target_value) { *code = *cmp1 = target_value; return true; } else if (*code == target_value && *cmp1 != target_value) { *code = *cmp1 = target_value; return true; } return false; }" GCC,riscv,266,"riscv_can_use_return_insn (const char *fnname) { if (TARGET_IS_RISCV) { rtx return_addr = get_last_insn_with_type (CALL_INSN, NULL_RTX); rtx stack_ptr = get_last_insn_with_type (STACK_POINTER_REG, NULL_RTX); if (!return_addr || !stack_ptr) return true; } return false; }" GCC,riscv,267,"riscv_classify_address (machine_mode mode, rtx x, bool strict_p, rtx *info) { rtx addr = XEXP (x, 0); enum rtx_code op = GET_CODE (addr); if (op != PLUS) return false; rtx reg = XEXP (addr, 0); rtx offset = XEXP (addr, 1); if (!riscv_valid_reg_for_mode_p (reg, mode)) return false; if (!riscv_valid_offset_for_mode_p (offset, mode)) return false; if (strict_p && riscv_reg_ok_strict_p (reg)) return false; *info = gen_rtx_MEM (mode, reg, offset); return true; }" GCC,riscv,268,"void riscv_class_max_nregs(riscv_target target, int value) { if (target != riscv) { printf(""ERROR: Target is not riscv""); return; } CLASS_MAX_NREGS = value; }" GCC,riscv,269,"bool riscv_conditional_register_usage(riscv_target_state *state, bool is_riscv1) { if (is_riscv1) { state->flags |= RISCV_COND_REGISTER; return true; } return true; }" GCC,riscv,270, GCC,riscv,271, GCC,riscv,272,"void riscv_emit_int_order_test(rtx CMP0, rtx CMP1, rtx TARGET, rtx INVERT_PTR) { rtx temp_target; temp_target = gen_reg_rtx (Pmode); emit_insn (gen_cmp (temp_target, CMP0, CMP1)); emit_insn (gen_setcc (TARGET, compare_mode, temp_target)); if (INVERT_PTR != NULL) { rtx inverted_target; inverted_target = gen_reg_rtx (QImode); emit_insn (gen_xor (inverted_target, TARGET, GEN_INT (1))); emit_insn (gen_setcc (INVERT_PTR, compare_mode, inverted_target)); } }" GCC,riscv,273, GCC,riscv,274, GCC,riscv,275, GCC,riscv,276, GCC,riscv,277, GCC,riscv,278,"riscv_force_address (rtx x, enum machine_mode mode) { if (x && ADDRESS_P (x) && GET_MODE (x) == mode) return x; rtx reg = gen_reg_rtx (mode); rtx insn = gen_rtx_SET (VOIDmode, reg, x); emit_insn (insn); return reg; }" GCC,riscv,279,"riscv_force_binary (int code, rtx x, rtx y, enum mode mode) { rtx result, op1, op2; result = gen_rtx_REG (mode, gen_reg_rtx (mode)); switch (code) { case PLUS: op1 = x; op2 = y; break; case MINUS: op1 = x; op2 = y; break; case MULT: op1 = x; op2 = y; break; case DIV: op1 = x; op2 = y; break; case AND: op1 = x; op2 = y; break; case OR: op1 = x; op2 = y; break; case XOR: op1 = x; op2 = y; break; case LSHIFT: op1 = x; op2 = y; break; case RSHIFT: op1 = x; op2 = y; break; case ASHIFT: op1 = x; op2 = y; break; default: gcc_unreachable (); } switch (GET_CODE (op1)) { case CONST_INT: result = gen_rtx_CONST (mode, op1); break; case CONST_FLOAT: result = gen_rtx_CONST (mode, op1); break; case CONST_DOUBLE: result = gen_rtx_CONST (mode, op1); break; case REG: result = gen_rtx_REG (mode, op1); break; default: gcc_unreachable (); } return result; }" GCC,riscv,280,"riscv_force_temporary (DEST, VALUE, TARGET_SPECIFIC) { if (TARGET_SPECIFIC) { new_register = create_new_register(); copy_value_to_register(new_register, VALUE); return new_register; } else { copy_value_to_register(DEST, VALUE); return DEST; } }" GCC,riscv,281,"void riscv_for_each_saved_reg(fn_type fn, int sp_off, int target) { int start_reg = BP_REGNUM; int end_reg = SP_REGNUM; for (int reg = start_reg; reg <= end_reg; reg++) { fn(reg, sp_off); } }" GCC,riscv,282,"riscv_function_arg (int arg_size, enum machine_mode type, int type_align, int named) { rtx ret; if (arg_size == 8) { if (type == SFmode || type == DFmode || type == XFmode) ret = DFmode; else ret = SImode; } else if (arg_size == 4) { if (type == SFmode || type == DFmode || type == XFmode) ret = SFmode; else ret = word_mode; } else if (arg_size == 2) { if (type == SFmode || type == DFmode || type == XFmode) ret = THImode; else ret = HImode; } else if (arg_size == 1) { if (type == SFmode || type == DFmode || type == XFmode) ret = QImode; else ret = BLKmode; } else { gcc_unreachable(); } if (TYPE_ALIGN(type) > type_align) ret = ATTRIBUTE_MODE_FORCES_REG(ret); return ret; }" GCC,riscv,283,"int riscv_function_arg_advance (CUMULATIVE_ARGS *cum, int n) { int i = cum->num_gprs; if (n < 8) { cum->gprs[i] = n; cum->regno_reg_rtx[n] = gen_rtx_REG (Pmode, i++); } else i += 8; cum->num_gprs = i; return i; }" GCC,riscv,284,int riscv_function_arg_boundary(void) { return STACK_BOUNDARY; } GCC,riscv,285,riscv_function_ok_for_sibcall (void) { return true; } GCC,riscv,286,"VALTYPE riscv_function_value(VALTYPE valtype, MODE mode, int targetspec){ if(valtype != null){ VALTYPE function_value = riscv_compute_function_value(valtype, mode); return function_value; } else{ VALTYPE function_value = riscv_compute_function_value(null, mode, targetspec); return function_value; } }" GCC,riscv,287,"bool riscv_hard_regno_mode_ok_p (unsigned regno, machine_mode mode) { if (GET_MODE_SIZE (mode) > 32) return false; if (regno >= 32) return false; if (TARGET_RISCV_11_1_1) return true; if (TARGET_RISCV_11_1_0_OR_1) { unsigned bitmap = (TARGET_RISCV_11_1_0_OR_1 >> regno) & 1; return ((bitmap >> GET_MODE_BITSIZE (mode)) & 1) != 0; } return false; }" GCC,riscv,288,int riscv_hard_regno_nregs (int HARD_REGNO) { switch (HARD_REGNO) { case 1: return 1; default: return 0; } } GCC,riscv,289,bool riscv_leaf_function_p (const_tree decl) { if (memoized_flag != -1) return memoized_flag; int target_specific_value = TARGET_DEFAULT (riscv_leaf_function_p); if (target_specific_value == 0) { memoized_flag = 1; } else { memoized_flag = 0; } return memoized_flag; } GCC,riscv,290, GCC,riscv,291,"riscv_legitimate_constant_p (const_tree ptr, const_tree value) { if (ptr == value) return true; else return false; }" GCC,riscv,292,"tree riscv_legitimize_address (enum machine_mode mode, rtx x) { if (legitimate_address_p (mode, x)) return x; if (CONSTANT_ADDRESS_P (x)) { HOST_WIDE_INT addr = INTVAL (x); if (addr >= 0 && addr <= 0xFFFFFFFF) { return gen_rtx_ADDR (mode, GEN_INT (addr | (1 << 31))); } } return NULL; }" GCC,riscv,293,"void riscv_legitimize_const_move(rtx dest, rtx src) { if (!immediate_operand (src, QImode)) return; gcc_assert (!move_operand (src, VOIDmode)); rtx new_insn = gen_rtx_SET (VOIDmode, dest, src); riscv_state.riscv = 0; riscv_state.const_value = 0; emit_insn (new_insn); }" GCC,riscv,294,"void riscv_legitimize_move(rtx dest, rtx src) { if (! set_destination (dest, src) emit_insn (gen_rtx_SET (VOIDmode, dest, src)); else { if (! riscv_validate_move (dest, src)) { rtx new_dest, new_src; riscv_get_equivalent_move (dest, src, new_dest, new_src); emit_insn (gen_rtx_SET (VOIDmode, new_dest, new_src)); } } }" GCC,riscv,295,"riscv_load_store_insns (rtx insn, rtx mem) { int num_insns = 0; enum rtx_code code = GET_CODE (insn); if (code == LOAD) { rtx addr_mode = XEXP (XEXP (insn, 0), 0); if (GET_CODE (addr_mode) == CONST_INT) { int riscv = riscv_md_const_int (INSN, 'r'); int bits = riscv_md_const_int (INSN, 'b'); int radix = riscv_md_const_int (INSN, 'x'); num_insns = calculate_num_insns (riscv, bits, radix); } }" GCC,riscv,296, GCC,riscv,297,"void riscv_move_integer(rtx dest, rtx value) { rtx temp = riscv_force_temporary(dest, 2, 0, 1); emit_move_insn(temp, value); emit_move_insn(dest, temp); }" GCC,riscv,298,"int riscv_option_override (unsigned int code, struct gcc_options *opts, size_t ct) { switch(code) { case 1: if (opts->x_mdiv && !RISC-V_SUBSUMES(%)) { fprintf(stderr, ""-mdiv requires -march to subsume the % extension\n""); return 1; }" GCC,riscv,299, GCC,riscv,300,"unsigned char riscv_output_move (rtx dest, rtx src) { unsigned char val; int dest_size, src_size; if (GET_CODE (src) == CONST_INT) { if ((dest_size = GET_MODE_SIZE (GET_MODE (dest))) < 8) { if (abs_int_val (INTVAL (src)) >= (1LL << (8 * dest_size - 1))) { val = GEN_INT (INTVAL (src) >> (8 * (8 - dest_size))); } else { val = src; } } else { val = src; } } else if (GET_CODE (src) == CONST_DOUBLE) { if (FP_REG_P (dest)) { val = GEN_INT (0); } else { rtx temp; val = NULL_RTX; switch (GET_MODE_SIZE (GET_MODE (dest))) { case 4: temp = gen_lowpart (SImode, src); val = force_operand (temp, dest); break; case 8: temp = const0_rtx; break; case 16: temp = gen_rtx_CONST_DOUBLE (GET_MODE (src), 0.0L); break; case 24: temp = gen_rtx_CONST_DOUBLE (GET_MODE (src), -0.0L); break; default: break; } instruct = rs6000_output_move_const (dest, temp); } } else { dest_size = GET_MODE_SIZE (GET_MODE (dest)); src_size = GET_MODE_SIZE (GET_MODE (src)); if (dest_size == src_size) { val = rs6000_output_move_lui_or_lla (dest, src); } else { val = NULL_RTX; } } return val; }" GCC,riscv,301,"riscv_cpu_info riscv_parse_cpu(const char *name) { int i; for(i = 0; i < NUM_RISCV_CPU_INFO_ENTRIES; i++) { if(strcmp(riscv_cpu_info_table[i].cpu_name, name) == 0) { return riscv_cpu_info_table[i]; } } riscv_cpu_info unknown_cpu_info; unknown_cpu_info.issuer_id = ""riscv""; unknown_cpu_info.vers_major = 0; unknown_cpu_info.vers_minor = 0; unknown_cpu_info.cpu_name = ""unknown cpu""; unknown_cpu_info.target_specific_value = ""unknown cpu %qs for -mtune""; return unknown_cpu_info; }" GCC,riscv,302,"int riscv_pass_aggregate_in_fpr_and_gpr_p(tree type, tree *fields) { if (!TREE_CODE_IS(type, RECORD_TYPE)) { return 0; } if (TYPE_RISCV_AND_VAL(type) != TYPE_RISCV_AND_VAL( TYPE_MAIN_VARIANT(TREE_TYPE(type)))) { return 0; } if (TYPE_NFIELDS(type) < 2) { return 0; }" GCC,riscv,303,"riscv_pass_aggregate_in_fpr_pair_p (tree type, tree *fields) { if (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE) return 0; if (!TYPE_HAS_FIELDS (type)) return 0; *fields = NULL; int num_fields = TYPE_FIELDS (type); *fields = (tree) calloc (num_fields, sizeof (tree)); if (*fields == NULL) return 0; if (riscv_pass_aggregate_fits_in_fpr_pair (type)) { int i; for (i = 0; i < num_fields; i++) { (*fields)[i] = TYPE_FIELDS (type)[i]; } return 1; } else { free (*fields); *fields = NULL; return 0; } } int riscv_pass_aggregate_fits_in_fpr_pair (tree type) { if (TARGET_RISCV && get_fields_riscv (type) == 2 && get_array_fields_riscv (type) == 0 && get_union_handler_riscv (type) == 0 && get_padding_riscv (type) == 0 && get_incoming_args_riscv (type) == 0) return 1; return 0; }" GCC,riscv,304," bool as_return, bool result_in_memory) { rtx mode = TYPE_MODE (type); if (type_biggest_mode (type) > Pmode) return true; if (GET_CLASS_NROWS (GET_MODE_ALIGNMENT (mode)) > 1) return as_return; if (GET_MODE_SIZE (mode) < riscv_0_2) return true; return default_pass_by_reference (type, decl, as_return, result_in_memory); }" GCC,riscv,305," rtx offset2, rtx mode2) { rtx_pair pair = gen_rtreg_pair (regno, REGNO + 1, mode); rtx v1, v2; v1 = gen_rtx_MEM (mode, gen_rtx_REG (mode1, offset1)); v2 = gen_rtx_MEM (mode, gen_rtx_REG (mode2, offset2)); return gen_lowpart (mode, gen_rtx_PAIR (pair, v1, v2)); }" GCC,riscv,306,"riscv_pass_fpr_single (enum machine_mode value_mode, enum machine_mode type_mode) { rtx arg; if (type_mode == BLKmode && value_mode == SFmode) { return gen_rtx_REG (SFmode, riscv_fprstart + riscv_struct_float_reg); } else { return gen_rtx_REG (value_mode, riscv_fprstart + riscv_single_fpr_offset (value_mode)); } }" GCC,riscv,307,int riscv_pass_mode_in_fpr_p (enum machine_mode mode) { switch (mode) { case SFmode: case DFmode: return (mode == SFmode) ? 1 : 2; default: return 0; } } GCC,riscv,308,"void riscv_print_operand(FILE *file, rtx op, int code) { switch (code) { case 'h': fprintf (file, ""%d"", RISC_CODE(op)); break; case 'R': fprintf (file, ""%d"", RISC_CODE(op)); break; case 'C': switch (RISC_CODE(op)) { case RISCV_ICMP_EQ: fprintf (file, ""beq .%s"", XSTR(COND, 0)); break; case RISCV_ICMP_NE: fprintf (file, ""bne .%s"", XSTR(COND, 0)); break; case RISCV_ICMP_LT: fprintf (file, ""blt .%s"", XSTR(COND, 0)); break; case RISCV_ICMP_GE: fprintf (file, ""bge .%s"", XSTR(COND, 0)); break; case RISCV_ICMP_LE: fprintf (file, ""ble .%s"", XSTR(COND, 0)); break; case RISCV_ICMP_GT: fprintf (file, ""bgt .%s"", XSTR(COND, 0)); break; default: error(""%s"", ""invalid use of '%%%c'"", XSTR(COND,0)); break; }" GCC,riscv,309,"void riscv_print_operand_reloc(char* OP, int CONTEXT, char* HI_RELOC, char* riscv) { if (strcmp(HI_RELOC, ""%hi"") == 0) { printf(""%hi(%s)"", HI_RELOC, OP); } else if (strcmp(HI_RELOC, ""%lo"") == 0) { if (CONTEXT == HIGH) { printf(""%lo(%s)"", HI_RELOC, OP); } else { printf(""%lo(%s)(%pcrel)"", HI_RELOC, OP); } }" GCC,riscv,310,"int riscv_register_move_cost (const struct riscv_target *target, int n, int mode) { int cost = 0; if (n == 8 && mode == MOV_COST_N_N) { cost = 2; } return cost; }" GCC,riscv,311,"riscv_regno_mode_ok_for_base_p (unsigned int regno, machine_mode mode, bool strict_p) { if (strict_p) { if (!REG_OK_STRICT_P (regno)) return false; } else { if (!REG_OK_NONSTRICT_P (regno)) return false; } rtx r = gen_rtx_REG (mode, regno); if (!TARGET_MODE_OK_FOR_BASE_P (mode, r)) return false; if (!REG_MODE_OK_FOR_BASE_P (mode, r)) return false; return true; }" GCC,riscv,312,"void riscv_restore_reg (int reg, void* mem) { asm volatile ( ""lw %0, %1\n"" : ""=r"" (reg) : ""m"" (mem) : ); }" GCC,riscv,313,riscv_return_in_memory (const_tree type) { if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE) return true; enum machine_mode mode = TYPE_MODE (type); if (mode != VOIDmode && GET_MODE_SIZE (mode) <= Pmode) return false; if (mode != VOIDmode && GET_MODE_SIZE (mode) > Pmode) { if (TARGET_64BIT) return true; else return false; } return false; } GCC,riscv,314,"bool riscv_save_reg_p (enum rtx_code REGNO) { } Explanation: - This function takes in an enumeration value of the register (REGNO) as a parameter. - The enum rtx_code is a set of predefined enumeration values used to represent different registers in the riscv architecture. - The function body includes the logic to determine whether the register needs to be saved or not. - The function returns a boolean value of true if the register needs to be saved, and false otherwise. Target-specific value: The riscv backend has its specific set of registers that need to be saved in the current function. Hence, the target-specific value for this function is ""riscv"". This target-specific value is used to identify the specific architecture and determine the registers that need to be saved. Sample implementation: The following is a sample implementation of the riscv_save_reg_p() function: bool riscv_save_reg_p (enum rtx_code REGNO) { switch (REGNO) { case R0: case R1: case R2: case R3: case FP: case T0: case T1: case T2: case RA: return true; default: return false; } }" GCC,riscv,315,"void riscv_save_restore_reg (int regno, int mode, int offset) { asm volatile (""fn %0, %1($sp)"" : : ""r""(regno), ""i""(offset) : ""memory""); }" GCC,riscv,316,"void riscv_setup_incoming_varargs (struct riscv_frame_info *frame, int register_args, int float_align, int int_align) { int regno; int arg_padding; int argnum; frame->gpr_save_area = frame->gpr_arg_area + register_args * sizeof(unsigned long); for (regno = 8, argnum = 0; regno < TARGET_FIRST_ARG_REGNUM; regno++, argnum++) { frame->gpr_save_area[i] = frame->gpr_arg_area[i]; } argnum = 0; arg_padding = 0; for (regno = TARGET_FIRST_ARG_REGNUM; regno <= TARGET_LAST_ARG_REGNUM; regno++, argnum++) { if (argnum >= register_args) { arg_padding = float_align; }" GCC,riscv,317, GCC,riscv,318,"riscv_split_symbol (rtx addr, enum machine_mode mode, rtx *low_out, rtx temp) { if (mode == MAX_MACHINE_MODE) { if (legitimize_set_src_p (addr, GET_MODE (addr))) { rtx high, low; low = legitimize_set_src (addr, GET_MODE (addr)); high = gen_lowpart (GET_MODE (addr), addr); if (low_out) *low_out = low; emit_insn (gen_rtx_SET (VOIDmode, temp, high)); return true; } }" GCC,riscv,319, GCC,riscv,320,"riscv_unspec_offset_high(Host_Wide_Int offset, HOST_WIDE_INT symbol_type, Expr *base, Expr *temp) { if ((offset & 0xFFFFFFFF00000000ull) != 0) { Expr *high_part = riscv_unspec_address(offset >> 32, symbol_type); return build2(EXPR_LIST, high_part, base); } else { return base; } }" GCC,riscv,321,"bool riscv_use_save_libcall(int riscv, int value) { return (riscv == riscv && value == 0); }" GCC,riscv,322,"riscv_valid_base_register_p(rtx x, machine_mode mode, bool strict_p) { rtx decl; int regno; if (strict_p && !REG_OK_STRICT) { if (GET_CODE(x) != REG || !REG_P(x) || !(REGNO(x) >= 0 && REGNO(x) <= 31)) return false; else return true; }" GCC,riscv,323,"bool riscv_valid_lo_sum_p(int mode, int sym_type) { if (mode == riscv) { if (sym_type == 0) { return true; } } return false; } int main() { if (riscv_valid_lo_sum_p(riscv, 0)) { printf(""LO_SUM can address any value for riscv architecture\n""); } else { printf(""LO_SUM cannot address any value for riscv architecture\n""); } return 0; }" GCC,riscv,324,"bool riscv_valid_offset_p(enum machine_mode mode) { if (!RISCV_MODE_SUPPORTED(mode)) { return false; } enum reg_class base_reg_class = RISCV_ADDR_BASE_REGS(mode); for (unsigned int i = 0; i < GET_NUM_BASE_REGISTERS(base_reg_class); i++) { rtx base_reg = gen_rtx_REG(base_reg_class, i); rtx offset = gen_rtx_CONST_INT(mode, X); rtx addr = gen_rtx_PLUS(mode, base_reg, offset); if (!REG_ADDR_VALID(addr)) { return false; } } return true; }" GCC,riscv,325,"int riscv_address_insns(enum machine_mode mode, rtx x, bool might_split_p) { if (!TARGET_RISCV) return 0; if (!legitimate_address_p (mode, x, 1)) return 0; if (might_split_p) { int bits_per_word = BITS_PER_WORD; int words_needed = (GET_MODE_BITSIZE (mode) + bits_per_word - 1) / bits_per_word; if (!CONSTANT_P (x) && !MEM_P (x) && !SYMBOLIC_CONST_ADDRESS_P (x)) return 0; if (REG_P (x) || REG_OFFSET_P (x)) return 1; return words_needed; } else { return 1; } }" GCC,riscv,326,"riscv_arg_partial_bytes (enum machine_mode mode, int target_flag) { if (mode == BLKmode) return 0; if (target_flag == 0) { if (mode == CCmode) return 4; if (mode == ATTR_TYPE_ORIG (mode)) mode = TYPE_MODE (mode); if (mode == SImode || mode == UIView mode) return 4; if (mode == DImode || mode == UImode || mode == TFmode) return 8; } return 0; }" GCC,riscv,327,"void riscv_block_move_straight(char* src, char* dest, int length) { int i; for(i=0;i> i) & 1 != target_values[i]) { return false; } } return true; }" GCC,riscv,332,"void riscv_file_start(void) { fprintf(FILE, ""\t.option %spic\n"", TARGET_CPU_DEFAULT); fprintf(FILE, ""\t"",""\t.option norelax\n"",); }" GCC,riscv,333,"void riscv_for_each_saved_reg (rtx fn, rtx sp_offset) { const char *target = riscv_target_string; int i, num_regs; const rtx *regs; regs = riscv_initial_eps->saved_regs; num_regs = riscv_initial_eps->num_saved_regs; for (i = 0; i < num_regs; i++) { int regno = REGNO (regs[i]); if (regno == -1) continue; call_rtx_fn (fn, sp_offset, regno, target); } }" GCC,riscv,334, GCC,riscv,335,void riscv_function_arg_advance(riscv_arg r) { int size = riscv_arg_size(r); __builtin_gprms_offset += size; return; } GCC,riscv,336,riscv_function_ok_for_sibcall (const_tree fn) { if (!TREE_CODE (fn) == FUNCTION_DECL) return false; if (TREE_THIS_VOLATILE (fn) || DECL_TOTAL_RESTRICT (fn) || DECL_EXTERNAL (fn) || DECL_ASSEMBLER_NAME_SET_P (fn)) return false; return true; } GCC,riscv,337, GCC,riscv,338, GCC,riscv,339,"riscv_legitimize_const_move (rtx dest, rtx src) { if (GET_CODE (src) != CONST || !REG_P (dest)) return; if (!immediate_operand (src, GET_MODE (dest)) || move_operand (src, GET_MODE (dest))) return; rtx target = GEN_INT (0); rtx insn = gen_rtx_SET (VOIDmode, dest, gen_rtx_CONST (GET_MODE (dest), target)); emit_insn (insn); }" GCC,riscv,340,"void riscv_legitimize_move(rtx set_dest, rtx src) { if (!legitimate_move_p(set_dest, src)) { rtx_seq = gen_rtx_SET(set_dest, src); rtx_seq->target = gen_rtx_RISC0(0, 0, 0, 0, 0, 0); emit_insn(rtx_seq); } }" GCC,riscv,341,"void riscv_move_integer(riscv_register DEST, int VALUE, riscv_register TEMP) { if (VALUE >= 0 && VALUE <= 31) { printf(""mv %s, %d"", DEST, VALUE); } else { if (VALUE <= 2047 && VALUE >= -2048) { printf(""li %s, %d"", TEMP, VALUE); printf(""mv %s, %s"", DEST, TEMP); } else { printf(""lui %s, %d"", TEMP, (VALUE >> 12) & 0xFFFF); printf(""addi %s, %s, %d"", DEST, TEMP, VALUE & 0xFFF); } } }" GCC,riscv,342," const char *arg) { switch (code) { case 0: modify_target_options (""-mdiv requires -march to subsume the % extension"", arg); break; case 1: modify_target_options (""requested ABI requires -march to subsume the %qc extension"", arg); break; case 2: modify_target_options (""ABI requires -march=rv%d"", arg); break; case 3: modify_target_options(""-mpreferred-stack-boundary=%d must be between %d and %d"", arg); break; default: break; } }" GCC,riscv,343,"riscv_output_move (rtx src, rtx dest) { enum machine_mode dest_mode = GET_MODE (dest); enum machine_mode src_mode = GET_MODE (src); rtx insn = NULL_RTX; rtx tmp_rtx; switch (dest_mode) { case MODE_QImode: return gen_rtx_fmt_ee (VOIDmode, ""lbu\t%0,%1"", dest, src); case MODE_HImode: return gen_rtx_fmt_ee (VOIDmode, ""lhu\t%0,%1"", dest, src); case MODE_SImode: return gen_rtx_fmt_ee (VOIDmode, ""lw\t%0,%1"", dest, src); case MODE_DImode: return gen_rtx_fmt_ee (VOIDmode, ""ld\t%0,%1"", dest, src); case MODE_TI: if (GET_CODE (src) == CONST_INT) { rtx high = rtx_const_hi (src); rtx low = rtx_const_lo (src); if (high == 0) return gen_rtx_fmt_i (VOIDmode, ""li\t%0,#%l1\t%w1"", dest, low); else return gen_rtx_fmt_i (VOIDmode, ""lui\t%0,#%h1"", dest, high); } if (GET_CODE (src) == CONST_DOUBLE) return gen_rtx_fmt_ie (VOIDmode, ""#lfmv.x.d\t%0,%1"", dest, src); gcc_assert (GET_MODE_PRECISION (src_mode) == 64); break; case MODE_TF: switch (src_mode) { case MODE_TI: tmp_rtx = gen_rt_floating ((HOST_WIDE_INT) XWINT (XINT (src, 1))); return gen_rtx_fmt_ie (VOIDmode, ""#fli.w\t%0,x0"", dest, tmp_rtx); case MODE_TF: return gen_rtx_fmt_ie (VOIDmode, ""#fli.w\t%0,%1"", dest, src); }" GCC,riscv,344,"void riscv_pass_by_reference (int riscv, int zero, int two) { TARGET_PASS_BY_REFERENCE = riscv, zero, two; }" GCC,riscv,345,"tree_operand riscv_pass_fpr_single(tree_operand arg, int fpr_reg) { if (TREE_CODE(arg) != INTEGER_CST || TREE_CODE(arg) != ADDR_EXPR) { return NULL; } tree type = TREE_TYPE(arg); if (TREE_CODE(type) == RECORD_TYPE && TYPE_ATTRIBUTES(type) && attr_packed_p(TYPE_ATTRIBUTES(type))) { int regno = arg->operand; union tree_node temp; memset (&temp, 0, sizeof temp); temp.int_cst = regno; if (temp.gt_type.mode == BLKmode) { return arg; } } if (temp_value->type.mode == SFmode) { return arg; }" GCC,riscv,346,"riscv_setup_incoming_varargs (CUMULATIVE_ARGS *cum, tree fntype, tree functype ATTRIBUTE_UNUSED) { if (!TYPE_HAS_VARARGS (fntype)) return; unsigned int total_size = 0; tree arg_type = TYPE_ARG_TYPES (fntype); while (!IS_NULL_TREE (arg_type)) { total_size += tree_to_uhwi (TYPE_SIZE (arg_type)); arg_type = TREE_CHAIN (arg_type); } cum->incoming_varargs_offset = 0; cum->incoming_varargs_size = total_size; cum->incoming_varargs_alignment = 1; cum->target_args.parm_info.type = RS_CUM_ARGS_INCOMING_VARARGS; cum->target_args.parm_info.type = 'r'; cum->target_args.parm_info.size = 1; cum->target_args.parm_info.offset = 0; }" GCC,riscv,347, GCC,riscv,348,"void riscv_split_integer(long long int input, int *output) { output[0] = input >> 32; output[1] = input & 0xFFFFFFFF; output[2] = (input >> 32) & 0xFFFF; output[3] = input & 0xFFFF; input = (output[0] << 32) | output[1] | (output[2] << 32) | output[3]; }" GCC,riscv,349,"bool riscv_split_symbol(machine_mode mode, rtx addr, rtx *low_out, rtx temp) { if (mode == MAX_MACHINE_MODE && GET_CODE(addr) == MEM && XEXP(addr, 0) == gen_rtx_CONST(mode, GEN_INT_(0))) { rtx hi_part = gen_highpart(mode, gen_rtx_REG(mode, temp)); emit_move_insn(hi_part, XEXP(addr, 1)); if (low_out != NULL) { *low_out = gen_lowpart(mode, gen_rtx_REG(mode, temp)); } return true; } else if (GET_CODE(addr) == LABEL_REF && XINT(addr, 1) == 0 && !riscv_force_temporary(addr)) { if (low_out != NULL) { *low_out = addr; } return true; } return false; }" GCC,riscv,350,"bool riscv_valid_lo_sum_p (mode_t mode, symbol_type_t sym_type) { if (TARGET_RISCV) { if (mode == RISCV) { if (sym_type == SYM_TYPE) { return true; } } } return false; }" GCC,riscv,351,"int riscv_address_insns (machine_mode mode, rtx x, bool might_split_p) { int insns = 0; rtx addr = x; rtx inner_addr; if (! rtx_addr_can_believe_p (x, mode)) return 0; if (!rtx_memory_addr_operand (x, mode)) return 0; if (GET_CODE (x) == REG) return 1; if (GET_CODE (x) == PLUS) { inner_addr = XEXP (x, 1); mode = GET_MODE (inner_addr); if (GET_CODE (inner_addr) == REG) return 1; if (!rtx_addr_can_believe_p (inner_addr, mode)) return 0; if (might_split_p) { if (rtx_memory_addr_operand (inner_addr, mode)) { int num_words = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; insns = num_words * riscv_address_insns (mode, inner_addr, false); }" GCC,riscv,352,function riscv_epilogue_uses(riscv) { if (target_specific_value == 'riscv') { for each instruction in function { if (instruction is epilogue instruction) { return true; } } } return false; } GCC,riscv,353,riscv_save_reg_p (int regno) { bool riscv_save_reg = false; if (regno == RISCV_RETURN_REG) { riscv_save_reg = true; } else if (regno >= RISCV_INTERRUPTIBLE_MIN && regno <= RISCV_INTERRUPTIBLE_MAX) { riscv_save_reg = true; } else if (regno == RISCV_FRAME_POINTER_REG || regno == RISCV_GLOBAL_POINTER_REG || regno == RISCV_STACK_POINTER_REG) { riscv_save_reg = true; } return riscv_save_reg; } GCC,riscv,354,"bool riscv_split_symbol(enum machine_mode mode, rtx addr, rtx *low_out, rtx temp) { if (mode == MAX_MACHINE_MODE) { if (GET_CODE(addr) == CONST_INT && INTVAL(addr) >= 0) { rtx high, low; high = riscv_force_temporary(mode, temp); low = GEN_INT(INTVAL(addr) & ((1 << 16) - 1)); if (low_out != NULL) *low_out = low; emit_move_insn(high, GEN_INT(INTVAL(addr) >> 16)); return true; } else return false; } else { if (GET_CODE(addr) == MEM && GET_MODE(addr) == mode) { extract_low_high_parts(addr, low_out, temp); return true; } else return false; } }" GCC,riscv,355,"riscv_address_cost (rtx x, enum rtx_code code) { unsigned int cost = 0; unsigned int multiplier = 1; switch (x,1) { case riscv: multiplier = 1; break; } switch (code) { case LOAD: cost = 3 * multiplier; break; case STORE: cost = 5 * multiplier; break; } return cost; }" GCC,riscv,356,"riscv_address_insns (rtx x, enum machine_mode mode, bool might_split_p) { int insn_count = 0; unsigned int op_size = GET_MODE_SIZE (mode); unsigned int addr_mode = GET_MODE (x); if ((mode != DImode && mode != SImode && mode != HImode && mode != QImode) || addr_mode != SImode) return 0; if (might_split_p) { for (unsigned int i = 0; i < op_size; i += UNITS_PER_WORD) ++insn_count; } else { ++insn_count; } return insn_count; }" GCC,riscv,357,"riscv_add_offset (rtx reg, rtx offset, rtx temp, enum rtx_code riscv) { if (GET_CODE (offset) == CONST_INT && INTVAL (offset) >= -2048 && INTVAL (offset) <= 2047) return gen_rtx_PLUS (GET_MODE (reg), reg, offset); if (riscv_force_temporary (offset, temp, riscv)) return gen_rtx_PLUS (GET_MODE (reg), reg, temp); }" GCC,riscv,358,"void riscv_adjust_block_mem(int riscv, void *mem, int length, int *loop_reg, void **loop_mem) { int loop_reg_index = riscv_alloc_gr(); *loop_reg = loop_reg_index; riscv_emit_loadi(loop_reg_index, (uint64)mem); *loop_mem = riscv_alloc_mr(); riscv_emit_memref(*loop_mem, loop_reg_index, length, 0, RISCV_LOAD); }" GCC,riscv,359,void riscv_allocate_stack_slots_for_args (CUMULATIVE_ARGS *cum) { int num_args = cum->num_args; int stack_words; if (cum->caller_align < STACK_ALIGN) cum->caller_align = STACK_ALIGN; stack_words = (cum->caller_size + (STACK_UNIT_SIZE - 1)) / STACK_UNIT_SIZE; int num_stack_slots = (num_args + 2) / 2; num_stack_slots = ((num_stack_slots + 7) / 8) * 8; int stack_offset = (-num_stack_slots) * STACK_UNIT_SIZE; stack_offset = ((stack_offset + cum->caller_align - 1) / cum->caller_align) * cum->caller_align; cum->caller_size = stack_offset + (stack_words * STACK_UNIT_SIZE); cum->caller_offset = stack_offset; cum->accumulate = stack_words != 0; } GCC,riscv,360,"riscv_arg_partial_bytes(riscv,0) { partial_arg_size = riscv,0 * partial_arg_type_size; partial_arg_size_bytes = partial_arg_size * 8; return partial_arg_size_bytes; }" GCC,riscv,361,"target_ulong riscv_asan_shadow_offset(enum target_machine machine) { switch (machine) { case riscv: return 29 << 12; default: fprintf(stderr, ""Unsupported architecture\n""); return 0; } }" GCC,riscv,362, GCC,riscv,363,"void riscv_block_move_loop(void *dest, const void *src, size_t length, size_t bytes_per_iter) { const char *src_bytes = (const char *)src; char *dest_bytes = (char *)dest; if (dest >= src && dest < src + length) { return; } for (size_t i = 0; i < length; i += bytes_per_iter) { for (size_t j = 0; j < bytes_per_iter; j++) { *dest_bytes = *src_bytes; src_bytes++; dest_bytes++; } } }" GCC,riscv,364,"void riscv_block_move_straight(char *dest, char *src, int length) { asm volatile ( ""li t0, 0\n\t"" ""la t1, %0\n\t"" ""la t2, %1\n\t"" : : ""r"" (src), ""r"" (dest) : ""%t0"", ""%t1"", ""%t2"" ); asm volatile ( ""loop: \n\t"" ""lb t3, 0(t1)\n\t"" ""sb t3, 0(t2)\n\t"" ""addi t1, t1, 1\n\t"" ""addi t2, t2, 1\n\t"" ""addi t0, t0, 1\n\t"" ""blt t0, %0, loop\n\t"" : : ""r"" (length) : ""%t0"", ""%t1"", ""%t2"", ""%t3"" ); }" GCC,riscv,365,"int riscv_build_integer(rtl_operand_t value, rtx_insn *codes) { int num_operations = 0; rtx_insn *curr_insn = codes; rtx_insn *temp_insn = emit_move_insn(gen_rtx_REG(RT_INT, 0), value); curr_insn = gen_rtx_LABEL_REF(delta_label_ptr(), temp_insn); emit_if_then_else(temp_insn, NULL_RTX, NULL_RTX, temp_insn); num_operations += 2; curr_insn = emit_library_call(cfun->function_library_code(""atoi""), 1, gen_rtx_REG(RT_INT, 0)); num_operations += 1; curr_insn = emit_move_insn(value, gen_rtx_REG(RT_INT, RISCV_INT_REGNUM)); num_operations += 1; for (int i = 0; i < 9; i++) { curr_insn = emit_move_insn(gen_rtx_REG(RT_INT, RISCV_INT_REGNUM + i + 1), gen_rtx_CONST_INT(1)); num_operations += 1; } return num_operations; }" GCC,riscv,366,"riscv_call_tls_get_addr (const char *sym, enum tls_symbol_type type, RTX result) { RTX tls_desc, tls_addr, func, insns; int target_index; target_index = TARGET_TLS_GET_ADDR_VALUE; const char *name = ""__tls_get_addr""; func = gen_rtx_SYMBOL_REF (Pmode, name); tls_desc = gen_rtx_TLS_DESC (Pmode, func, target_index); tls_addr = gen_rtx_PLUS (Pmode, tls_desc, GEN_INT (0)); switch (type) { case tls_type_global_dynamic: insns = gen_rtx_CALL (VOIDmode, tls_addr); break; case tls_type_local_dynamic: insns = gen_rtx_CALL (VOIDmode, tls_addr); break; default: gcc_unreachable (""unsupported TLS symbol type""); } insns = gen_rtx_SET (VOIDmode, result, insns); return insns; }" GCC,riscv,367,"bool riscv_cannot_copy_insn_p(const rtx_insn *insn) { bool cannot_copy = false; FOR_EACH_OPERAND(insn, op, i, 0) { if (CONSTANT_ADDRESS_P(XEXP(op, 0))) { cannot_copy = true; break; } } return cannot_copy; } " GCC,riscv,368,"bool riscv_cannot_force_const_mem(target, 0) { if (target == 0) { return false; } else { return true; } }" GCC,riscv,369, GCC,riscv,370,"bool riscv_can_change_mode_class(int mode, int class) { if (TARGET_ARCH != ""riscv"") { return false; } if (!is_mode_supported(mode)) { return false; } if (has_priv_mode_switch()) { if (is_class_supported(class)) { return true; } else { return false; } } return false; }" GCC,riscv,371, GCC,riscv,372,int riscv_can_use_return_insn(void) { if (TARGET_RISCV) { return 1; } return 0; } GCC,riscv,373,"bool riscv_classify_address(unsigned int mode, rtx x, machine_mode *info, bool strict_p) { if (mode == TARGET_MACH_MODE && GET_CODE(x) == ADDRESS) { *info = TARGET_MACH_MODE; if (strict_p) { info->flags |= REG_OK_STRICT; } info->target_specific = ""riscv,0,1,0,1""; return true; } else { return false; } }" GCC,riscv,374,"char* riscv_classify_symbol(SYMBOL_REF s) { char* method = NULL; char* riscv_target = XSTR (s->riscv_target, 0); if (strcmp(riscv_target, ""RV32I"") == 0) { method = ""method for accessing symbols and labels in RV32I architecture""; } else if (strcmp(riscv_target, ""RV64I"") == 0) { method = ""method for accessing symbols and labels in RV64I architecture""; } else { printf(""Invalid/unsupported riscv_target value""); } return method; }" GCC,riscv,375,int riscv_classify_symbolic_expression(int X) { int base = X & 0xFF; if (base == 0) { return 0; } if (base == (base & 0xFFFF)) { return 1; } if (base == (base & 0xFFFFFFFF)) { return 2; } if (base == (base & 0xFFFFFFFFFFFFFFFF)) { return 3; } return 4; } GCC,riscv,376,"int riscv_class_max_nregs(char* target_arch) { if (strcmp(target_arch, ""riscv"") == 0) { return 0; } return 0; }" GCC,riscv,377, GCC,riscv,378,"if (riscv_compressed_lw_offset_p (immediate_val)) { printf (""The input value satisfies the condition.\n""); } else { printf (""The input value does not satisfy the condition.\n""); } Target-specific Value: riscv,3,0,0 Code: bool riscv_compressed_lw_offset_p (unsigned int value) { if ((value % 4) != 0) { return false; } unsigned int imm_val = value / 4; if (imm_val > 31) { return false; } return true; }" GCC,riscv,379, GCC,riscv,380, GCC,riscv,381,int riscv_constant_alignment (void) { return 4; } GCC,riscv,382,int riscv_const_insns(int X) { int masked = X & 0xFFF; if (masked == X) { return 1; } if (X >= -4096 && X <= 4095) { return 2; } if (X >= -524288 && X <= 524287) { return 3; } return 0; } GCC,riscv,383,"const char *riscv_elf_select_rtx_section(const char *name, int flags) { if (strcmp(name, "".rodata.cst"") != 0) return NULL; if (flags & FLAGS_ELF_SECTION_IS_SMALL) return "".s%s""; return NULL; }" GCC,riscv,384,"void riscv_emit_attribute (FILE *f, const char *fmt, ...) { va_list args; va_start (args, fmt); fprintf (f, ""\t.attribute arch, \""%s\""\n"", DEFAULT_ARCH); fprintf (f, fmt, args); fprintf (f, ""\t.attribute unaligned_access, %d\n"", 1); fprintf (f, fmt, args); fprintf (f, ""\t.attribute stack_align, %d\n"", 8); va_end (args); }" GCC,riscv,385, GCC,riscv,386, GCC,riscv,387,"void riscv_emit_int_order_test(rtx cmp0, rtx cmp1, rtx target, rtx invert_ptr) { rtx code, cmp0_low, cmp0_high, cmp0_unsigned, cmp1_low, cmp1_high, cmp1_unsigned, cmp0_greater, cmp1_greater, cmp_equal, result, result_invert; rtx riscv,0 = CODE_FOR_riscv_order_test; code = gen_reg_rtx (CCmode); cmp0_low = gen_lowpart (CCmode, cmp0); cmp0_high = gen_highpart (CCmode, cmp0); cmp0_unsigned = gen_reg_rtx (CCmode); cmp1_low = gen_lowpart (CCmode, cmp1); cmp1_high = gen_highpart (CCmode, cmp1); cmp1_unsigned = gen_reg_rtx (CCmode); emit_cmp_insn (cmp0_low, cmp1_low); emit_cmp_insn (cmp0_high, cmp1_high); emit_movinsn_operand (cmp0_unsigned, cmp0_high, GEN_INT (1)); emit_movinsn_operand (cmp1_unsigned, cmp1_high, GEN_INT (1)); cmp0_greater = gen_rtx_CC (GT, CCmode, cmp0_unsigned, cmp1_unsigned); cmp1_greater = gen_rtx_CC (GT, CCmode, cmp1_unsigned, cmp0_unsigned); cmp_equal = gen_rtx_CC (EQ, CCmode, cmp0_unsigned, cmp1_unsigned); emit_cmp_insn (cmp0_greater, cmp1_greater); emit_cmp_insn (code, cmp_equal); if (invert_ptr) { result_invert = gen_rtx_CC (NOT, CCmode, code, NULL_RTX); result = gen_rtx_SET (VOIDmode, *invert_ptr, result_invert); } else { result = gen_rtx_SET (VOIDmode, target, code); }" GCC,riscv,388,"riscv_emit_move (rtx dest, rtx src) { rtx insn; if (!can_create_pseudo_p ()) { if (GET_CODE (dest) == CONST_INT && GET_CODE (src) == CONST_INT) emit_insn (gen_riscv_move_imm (dest, src)); else emit_insn (gen_riscv_move (dest, src)); } else emit_insn (gen_riscv_move (dest, src)); if (GET_CODE (dest) == CONST_INT && GET_CODE (src) == CONST_INT) { REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NOTE, src, REG_NOTES (insn)); REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NOTE, dest, REG_NOTES (insn)); REG_NOTE_KIND (insn) = REG_STR_VALID; } }" GCC,riscv,389, GCC,riscv,390, GCC,riscv,391, GCC,riscv,392, GCC,riscv,393,"riscv_expand_conditional_move (rtx op, rtx cons, rtx alt, rtx dest) { if (!(op && cons && alt && dest)) return 0; if (!RTX_EQUAL_P (GET_CODE (op), NE) && !RTX_EQUAL_P (GET_CODE (op), EQ) && !RTX_EQUAL_P (GET_CODE (op), GT) && !RTX_EQUAL_P (GET_CODE (op), GE) && !RTX_EQUAL_P (GET_CODE (op), LT) && !RTX_EQUAL_P (GET_CODE (op), LE)) return 0; if (!op_strict_low_p (XEXP (op, 0)) && !op_strict_low_p (XEXP (op, 1))) return 0; emit_insn (gen_cond_movsi (dest, cons, alt)); return 1; }" GCC,riscv,394, GCC,riscv,395,"riscv_expand_int_scc (rtx *operands, rtx target) { rtx op0 = operands[0]; rtx op1 = operands[1]; enum riscv_target_values riscv_target_value = riscv_target_flags.first; if (riscv_target_value != riscv) return NULL; if (!RTX_UINTP (op0) || !RTX_UINTP (op1)) return NULL; rtx comp_insn = gen_riscv_scc (target, op0, op1); return comp_insn; }" GCC,riscv,396, GCC,riscv,397, GCC,riscv,398,"int riscv_extend_cost(const char *riscv, int is_sign_extend, int src_size, int dst_size, int is_src_signed, int is_dst_signed) { if ((riscv == ""riscv"") && (is_sign_extend == 0 || is_sign_extend == 1) && (src_size == 1 || src_size == 2) && (dst_size == 1 || dst_size == 2) && (is_src_signed == 0 || is_src_signed == 1) && (is_dst_signed == 0 || is_dst_signed == 1)) { return 2; } else { printf(""Invalid parameters for riscv_extend_cost function""); return -1; } }" GCC,riscv,399,"void riscv_file_start (void) { fprintf (asm_out_file, ""\t.option %spic\n"", riscv); fprintf (asm_out_file, ""\t.option norelax\n""); fprintf (asm_out_file, ""\t.globl\t%s\n"", riscv); fprintf (asm_out_file, ""%s:\n"", riscv); fprintf (asm_out_file, ""\t.option %d\n"", riscv); }" GCC,riscv,400,"int riscv_first_stack_step(int size, int min_size, int frame, int sp_adjust, int callee_save_size) { int initial_alloc = size; if (size > min_size) { int callee_alloc = frame - callee_save_size; if (callee_alloc > 0) { initial_alloc += callee_alloc; } } int stack_adjust = initial_alloc % sp_adjust; if (stack_adjust != 0) { initial_alloc += (sp_adjust - stack_adjust); } return initial_alloc; }" GCC,riscv,401, GCC,riscv,402,"void riscv_force_address(rtx x, enum mode mode) { if (!valid_address_p(mode, x)) { int riscv = TARGET_RISCV; if (riscv == TARGET_RISCV32) { x = force_reg(mode, x); } else if (riscv == TARGET_RISCV64) { x = force_reg(mode, x); } else { error(""Unsupported RISC-V target specific value""); } } }" GCC,riscv,403,"mode riscv_force_binary (mode MODE, rtx code, rtx X, rtx Y) { rtx result_reg = gen_reg_rtx (MODE); rtx and_insn; rtx tmp_reg; and_insn = gen_rtx_SET (VOIDmode, result_reg, gen_rtx_AND (MODE, X, Y)); emit_insn (and_insn); return result_reg; }" GCC,riscv,404,"static RTX_CODE riscv_force_temporary (rtx value) { rtx temp_reg; if (TARGET_NEW_PSEUDOS) { temp_reg = gen_rtx_REG (MODE_INT, gen_reg_rtx (Pmode)); emit_move_insn (temp_reg, value); } else { temp_reg = gen_rtx_REG (GET_MODE (value), DEST); emit_move_insn (temp_reg, value); } return temp_reg; }" GCC,riscv,405,"riscv_for_each_saved_reg(target, insn, fn, sp_offset) { for (int i = 0; i < saved_regs.length; i++) { int reg_offset = sp_offset + (i * size of register); insn = create_insn_store(loaded_regs[i], reg_offset); fn(loaded_regs[i], reg_offset); } }" GCC,riscv,406,"rtx riscv_frame_set (rtx reg, rtx mem) { rtx frame_rtx; mode (frame_rtx) = mode (reg); MEM (frame_rtx) = mem; TARGET (frame_rtx) = riscv,1; VALUE (frame_rtx) = reg; return frame_rtx; }" GCC,riscv,407,"riscv_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type, bool named) { tree t; if (named) { } else { } return t; }" GCC,riscv,408, GCC,riscv,409, GCC,riscv,410,"bool riscv_function_ok_for_sibcall(tree fndecl, void *target_specific_value) { if (fndecl == NULL) return false; if (TREE_VALUE_CHECK (fndecl, no_sibcall) || DECL_SIBLING_CALLS (fndecl) != 0) return false; if (TREE_VALUE_CHECK (fndecl, no_stack_protector)) return false; if (contains_nested_functions (fndecl) || contains_alloca_calls (fndecl)) return false; if (contains_non_local_goto (fndecl)) return false; return true; }" GCC,riscv,411,"riscv_function_value (enum machine_mode mode, tree valtype, rtx value) { rtx target_value = value; if (valtype) { if (GET_MODE (value) != mode) target_value = convert_modes (mode, GET_MODE (value), value); } else { if (riscv_target_values[1] == 0) return gen_rtx_VOIDmode (VOIDmode); else target_value = gen_rtx_TARGET_RETURN_VALUE (VOIDmode); } return target_value; }" GCC,riscv,412,"int riscv_gen_gpr_save_insn(rtx_insn *insn, rtx *operands) { rtx riscv = gen_rtx_REG (SImode, REG_RETVAL (1)); int operand1 = 2; int operand2 = 1; int operand3 = 0; rtx rtl1 = gen_rtx_REG (SImode, operand1); rtx rtl2 = gen_rtx_REG (SImode, operand2); rtx rtl3 = gen_rtx_REG (SImode, operand3); insn = emit_move_insn (riscv, rtl1); insn = emit_move_insn (rtl3, rtl2); operands[0] = insn; operands[1] = rtl1; operands[2] = rtl2; operands[3] = rtl3; return 4; }" GCC,riscv,413,"target_interrupt_type riscv_get_interrupt_type(const char *name) { if (strstr(name, ""interrupt"")) { return RISCV_INTERRUPT; } else if (strstr(name, ""user"")) { return RISCV_USER; } else if (strstr(name, ""supervisor"")) { return RISCV_SUPERVISOR; } else { return UNKNOWN_MODE; } }" GCC,riscv,414,"bool riscv_gpr_save_operation_p(rtx instruction) { rtx pattern = gen_rtx_RISC_V(0, valid_gpr_save_mode, const0_rtx, const0_rtx, const0_rtx, const0_rtx, const0_rtx, const0_rtx, const_double_int_rtx(1), gen_int_mode(3), gen_int_mode(1), gen_int_mode(1)); return pattern == instruction; }" GCC,riscv,415," tree args, int flags, bool * no_add_attrs) { tree attribute; for (attribute = args; attribute; attribute = TREE_CHAIN(attribute)) { tree value = TREE_VALUE(attribute); if (strcmp(TREE_STRING_POINTER(value), ""riscv"") != 0) continue; tree attr_value = TREE_VALUE(TREE_CHAIN(value)); if (strcmp(TREE_STRING_POINTER(attr_value), ""%qE"") == 0) { DECL_ATTRIBUTES(fndecl) = tree_cons(name, NULL_TREE, DECL_ATTRIBUTES(fndecl)); break; } } *no_add_attrs = true; }" GCC,riscv,416,"bool riscv_hard_regno_mode_ok (int regno, enum machine_mode mode) { if (regno >= 0 && regno <= 31) { if (mode == DImode || mode == SImode || mode == QImode) { return true; } }" GCC,riscv,417,"int riscv_hard_regno_nregs (HARD_REGNO, MODE) { if (HARD_REGNO == riscv && MODE == 1) return 1; else return false; }" GCC,riscv,418, GCC,riscv,419,"bool riscv_immediate_operand_p (rtx CODE, rtx X) { if (!riscv_opcode_p (CODE)) { return false; } if (!riscv_immediate_p (CODE)) { return false; } if (!riscv_immediate_constraint (CODE, X)) { return false; } return true; }" GCC,riscv,420,"uint32_t riscv_initial_elimination_offset(uint32_t FROM, uint32_t TO) { if (FROM == frame_pointer) { if (TO == stack_pointer) { return riscv, 0; } else if (TO == hard_frame_pointer) { return riscv, 0; } else { return 0; } } else { return 0; } }" GCC,riscv,421,"riscv_init_machine_status(target_specific_value) { if(target_specific_value != ""riscv"") { return ERROR; } else { machine_status_chunk = __builtin_malloc(RISCV_MACHINE_STATUS_SIZE); __builtin_memset(machine_status_chunk, 0, RISCV_MACHINE_STATUS_SIZE); return machine_status_chunk; } }" GCC,riscv,422,int riscv_integer_cost(int val) { int cost; if (val >= -2048 && val <= 2047) { cost = 1; } else if (val >= -524288 && val <= 524287) { cost = 2; } else { cost = 3; } return cost; } GCC,riscv,423,"bool riscv_interrupt_type_p(tree type) { return TYPE_HAS_TARGET_VALUES(type, riscv, ""interrupt""); }" GCC,riscv,424, GCC,riscv,425,"bool riscv_in_small_data_p(const_rtx exp) { if (!REG_P(exp) && !MEM_P(exp)) { return false; } rtx sdata_ref = gen_rtx_SYMBOL_REF (Pmode, "".sdata""); rtx sbss_ref = gen_rtx_SYMBOL_REF (Pmode, "".sbss""); if ((REG_P (exp) && REG_IN(rtx, sdata_ref)) || (MEM_P (exp) && MEM_IN(rtx, sdata_ref)) || (REG_P (exp) && REG_IN(rtx, sbss_ref)) || (MEM_P (exp) && MEM_IN(rtx, sbss_ref))) { return true; } return false; }" GCC,riscv,426,int riscv_issue_rate() { int max_issue_rate = GET_RISCV_ISSUE_RATE(); return max_issue_rate; } GCC,riscv,427,bool riscv_legitimate_address_p(address) { if (address < 0x00000000 || address > 0xffffffff) { return false; } if (address % 4 != 0) { return false; } return true; } GCC,riscv,428,bool riscv_legitimate_constant_p(unsigned long long int val) { if (val > (1ULL << 20) - 1) return false; if (val == (unsigned long long int)riscv) return true; else return false; } GCC,riscv,429,"riscv_legitimize_address (enum machine_mode mode, rtx x) { if ((mode == riscv) && (GET_CODE (x) == CONST_INT) && (INTVAL (x) == 1)) { rtx new_rtx = gen_rtx_CONST_INT (riscv, 1, 1, 0, 0, 1, ""shorten_memrefs"", 0); return new_rtx; } return NULL; }" GCC,riscv,430,"bool is_suitable_address(ADDR address){ } ADDR transform_address(ADDR address){ } ADDR riscv_legitimize_call_address(ADDR address){ if(is_suitable_address(address)){ return address; } else{ if(is_valid_address(address, ""riscv"")){ return transform_address(address); } else{ raise_error(""Invalid address for riscv architecture.""); } } }" GCC,riscv,431,"riscv_legitimize_const_move (rtx *operands) { rtx src = operands[0]; rtx dest = operands[1]; if (!immediate_operand (src, VOIDmode)) return false; if (move_operand (src, VOIDmode)) return false; rtx new_rtx = gen_rtx_CONST_INT (VOIDmode, GEN_INT (0)); emit_insn (gen_rtx_SET (VOIDmode, dest, new_rtx)); return true; }" GCC,riscv,432,"riscv_legitimize_move(DEST, SRC, riscv, 0, 0, 0) { if(DEST is not a register or SRC is not a register) { emit error; return; } if(DEST == SRC) { return; } emit ""move riscv, SRC""; emit ""move DEST, riscv""; return; }" GCC,riscv,433,"SET_SRC riscv_legitimize_tls_address(SYMBOL_REF loc) { SET_SRC address; address.loc = gen_rtx_REG (Pmode, regno); rtlo = gen_rtx_LO_SUM (Pmode, address.loc, GEN_INT (0)); address.loc = rtlo; return address; }" GCC,riscv,434,"riscv_load_store_insns (rtx_insn *insn, int riscv, int reg_width, int mem_type) { int insns = 0; enum rtx_code code = GET_CODE (insn); rtx reg = insn->src; rtx mem = insn->dest; switch (code) { case MEM: if (GET_CODE (reg) == REG) { if (riscv == 32 && reg_width == 64 && mem_type == 0) insns = 1; else insns = 0; } else insns = 0; break; case COMPARE: if (GET_CODE (mem) == MEM) { if (riscv == 64 && reg_width == 32 && mem_type == 1) insns = 1; else insns = 0; } else insns = 0; break; default: insns = 0; } return insns; }" GCC,riscv,435,"riscv_memmodel_needs_amo_acquire (const char *model) { if (strcmp (model, ""riscv"") == 0) return true; else return false; }" GCC,riscv,436,bool riscv_memmodel_needs_release_fence(riscv_memory_model model) { switch (model) { case RISCV_SEQ_CST: case RISCV_TSO: case RISCV_SC: return true; case RISCV_RLX: case RISCV_RRA: return false; default: gcc_unreachable(); } } GCC,riscv,437, GCC,riscv,438, GCC,riscv,439,"int riscv_min_arithmetic_precision (const char *target) { if (strcmp (target, ""riscv,32"") != 0) { return -1; } return 32; }" GCC,riscv,440,"bool riscv_modes_tieable_p (int riscv, enum machine_mode mode1, enum machine_mode mode2) { return false; }" GCC,riscv,441,"void riscv_move_integer(int dest, int value, int temp, int mode) { if (mode != 1) { return; } __asm__ __volatile__( ""mv %0, %1"" : ""=r"" (temp) : ""r"" (value) ); __asm__ __volatile__( ""mv %0, %1"" : ""=r"" (dest) : ""r"" (temp) ); }" GCC,riscv,442,"bool riscv_naked_function_p (const_tree func) { if (TREE_CODE(func) != FUNCTION_DECL) return false; tree attrs = DECL_ATTRIBUTES (func); while (attrs != NULL) { tree attr = TREE_VALUE (attrs); if (TREE_CODE (attr) == TARGET_ATTRIBUTE) { const char *attr_name = IDENTIFIER_POINTER (IDENTIFIER_NODE (TREE_VALUE (attr))); if (strcmp (attr_name, ""naked"") == 0) return true; } attrs = TREE_CHAIN (attrs); } return false; } int main () { static int __attribute__((naked)) dummy_func (void) { return 0; } bool is_naked = riscv_naked_function_p (dummy_func); printf (""Is dummy_func a naked function? %s \n"", is_naked ? ""Yes"" : ""No"" ); static int dummy_func2 (void) { return 0; } is_naked = riscv_naked_function_p (dummy_func2); printf (""Is dummy_func2 a naked function? %s \n"", is_naked ? ""Yes"" : ""No"" ); }" GCC,riscv,443, GCC,riscv,444, GCC,riscv,445,"riscv_output_gpr_save (rtx insn, int count) { rtx tmp, pat, p; pat = rtx_alloc (pat_code, """", insn); tmp = gen_rtx_REG (Pmode, T0_REG); pat = rtx_add_value (pat, tmp, 0); snprintf (tmp_op, tmp_op_size, ""call\t%s"", ""__riscv_save_%u""); tmp = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_CLOBBER (Pmode, tmp), gen_rtx_SYMBOL_REF (ptr_mode, cur_fix), gen_rtx_CONST_INT (Pmode, count), NULL_RTX, NULL_RTX); pat = rtx_add_value (pat, tmp, 1); return pat; }" GCC,riscv,446,"riscv_output_mi_thunk (FILE *file, rtx_insn *body, rtx_insn *epilogue) { rtx_insn *insn, *next; int last, expand_p = 0; int labelno = 0; fprintf (file, ""\t.type\t__riscv_output_mi_thunk, @function\n""); fprintf (file, ""__riscv_output_mi_thunk:\n""); insn = gen_rtx_CLOBBER (VOIDmode, stack_pointer_rtx); ix86_expand_insn (insn, 0, epilogue); insn = get_last_before_label (insn, 1); last = NOTE_INSN_FUNCTION_BEG (insn); while (NOTE_P (insn) && note_operand (insn) == NOTE_INSN_FUNCTION_BEG) insn = PREV_INSN (insn); do { next = NEXT_INSN (insn); if (INSN_CHAN (insn) != 0 || epilogue == NULL || NEXT_INSN (epilogue) == NULL) ix86_expand_insn (insn, 1, epilogue); else { if (INSN_CHAN (insn) == END_INSN) trace_outline (insn, epilogue); else insn = split_sequence (insn, next, &labelno); if (INSN_CHAN (insn) == END_INSN) break; } } while (insn = next); fprintf (file, ""\t.size\t__riscv_output_mi_thunk, .-__riscv_output_mi_thunk\n""); }" GCC,riscv,447,"riscv_output_move (rtx dest, rtx src) { enum rtx_code dest_code = GET_CODE (dest); enum rtx_code src_code = GET_CODE (src); enum machine_mode mode = GET_MODE (dest); switch (dest_code) { case REG: switch (src_code) { case REG: return gen_move_insn (dest, src); case CONST_INT: if (host_integerp (src, 1)) { if (BYTES_BIG_ENDIAN) return gen_rtx_fmt_d (move_optab, mode, dest, src); else return gen_rtx_fmt_w (move_optab, mode, dest, src); } else if (GET_MODE (dest) == SFmode) return gen_rtx_fmt_d (move_optab, mode, dest, gen_int_mode (GET_MODE (src), INTVAL (src))); else { if (S_1_INT) return gen_rtx_fmt_d (move_optab, mode, dest, XEXP (src, 0)); else return gen_rtx_fmt_d (move_optab, mode, dest, XEXP (src, S_1_INT)); }" GCC,riscv,448,"riscv_parse_cpu (const char *name) { if (name == NULL) return NULL; riscv_cpu_info cpu; if (strcmp (name, ""riscv"") == 0) { cpu->name = ""riscv""; cpu->arch = 0; cpu->tune = 0; cpu->desc = ""unknown cpu %qs for %<-mtune%>""; } else { return NULL; } return cpu; }" GCC,riscv,449, GCC,riscv,450, GCC,riscv,451,"riscv_pass_aggregate_in_fpr_pair_p (tree type, int *fields) { int result = -1; if (TYPE_STRUCT_P (type) && !TYPE_HAS_NONZERO_SIZE (type)) { if (struct_contains_bitfields (type)) { fields[0] = 2; fields[1] = 0; fields[2] = 1; fields[3] = 0; result = 1; } else { result = 0; } } return result; }" GCC,riscv,452,"int riscv_pass_by_reference(const char *riscv, int mode) { if (strcmp(riscv, ""riscv"") == 0) { if (mode == 0) { return 0; } else if (mode == 1) { return 1; } else if (mode == 2) { return 2; } } return -1; }" GCC,riscv,453,"void riscv_pass_fpr_pair(int regno, mode_t mode, int offset, mode_t mode1, int offset1, mode_t mode2, int offset2) { composite_t composite = calculate_composite(mode, offset, mode1, offset1, mode2, offset2); *(composite_fpr_pair*)regno = composite; return composite; } composite_t calculate_composite(mode_t mode, int offset, mode_t mode1, int offset1, mode_t mode2, int offset2) { size_t size = getSize(mode) + getSize(mode1) + getSize(mode2); uint8_t composite[size]; memcpy(&composite[offset], reg[optab[mode].operand_type][regno], getSize(mode)); memcpy(&composite[offset1], reg[optab[mode1].operand_type][regno + 1], getSize(mode1)); memcpy(&composite[offset2], reg[optab[mode2].operand_type][regno + 2], getSize(mode2)); return *(composite_t*)composite; } size_t getSize(mode_t mode) { return (size_t)optab[mode].size; }" GCC,riscv,454,"struct __attribute__ ((packed)) foo { float f; } where the SFmode value ""f"" is passed in REGNO but the struct itself has mode BLKmode. */ static int riscv_pass_fpr_single (enum machine_mode mode, tree type, int position, riscv_target target) { int fpr = -1; if (TYPE_STRUCT_PACKED(type)) { enum machine_mode element_mode = TYPE_MODE(TREE_TYPE(FIRST_FIELD(TYPE_FIELDS(type)))); return GET_FPR_FROM_MODE(element_mode, target); } if (mode == FLOAT_MODE || mode == DOUBLE_MODE || mode == LONG_DOUBLE_MODE) { fpr = GET_FPR_FROM_POSITION(position); } return fpr; }" GCC,riscv,455,int riscv_pass_mode_in_fpr_p(const_tree mode) { if (FLOAT_MODE_P(mode)) { if (TARGET_RISCV && !TARGET_SOFT_FLOAT && GET_MODE_CLASS(mode) == MODE_FLOAT) { return riscv ? 2 : 1; } return 0; } return 0; } GCC,riscv,456,"void riscv_print_operand (FILE *stream, rtx op, int code) { if (!op) { fprintf (stream, ""0""); return; } const char *prefix = """"; if (TARGET_BIG_ENDIAN) prefix = ""upper32(""; else if (code == 'h') code = 0; switch (code) { case 'h': fprintf (stream, ""%s%d"", prefix, REGNO (XEXP (op, 0)) + UNITS_PER_WORD / 2); break; case 'R': fprintf (stream, ""%dl"", REGNO (XEXP (op, 0))); break; case 'C': fprintf (stream, ""%sMULHU\n"", SIGNED (op) ? ""mulo"" : ""mulhi""); break; case 'A': fprintf (stream, ""%srl"", TARGET_ABI ? ""cold"" : ""hot""); break; case 'F': if (TARGET_ABI) { if (TARGET_UNALIGNED_ACCESS) fprintf (stream, ""lr.inc""); else fprintf (stream, ""\n""); } break; case 'V': if (TARGET_ABI) fprintf (stream, ""%d%d"", REGNO (op), REGNO (XEXP (op, 1))); else fprintf (stream, ""%dk"", juliahome.HOME_POINTER_NAME); break; case 'Z': gcc_assert (XEXP (op, 0)); fprintf (stream, XEXP (op, 0)); break; case 'J': gcc_assert (op); fprintf (stream, ""%d"", CODE_FORA (MEM)).operands[3], XEXP (MEM, 0)); break; default: gcc_unreachable (); } }" GCC,riscv,457,"void riscv_print_operand_address(FILE *file, rtx x) { if (riscv) { fprintf(file, ""riscv (%s)"", XSTR(x, 0)); } else { fprintf(file, ""%.4s:(%s)"", riscv, XSTR(x, 0)); } }" GCC,riscv,458,"void riscv_print_operand_reloc (FILE *stream, rtx op, rtx context, int hi_reloc) { char *prefix = """"; char *suffix = """"; switch (hi_reloc) { case DM_HI_RELOC: prefix = ""%hi""; break; case DM_LO_RELOC: prefix = ""%lo""; break; case DM_PCREL_HI_RELOC: prefix = ""%pcrel_hi""; break; case DM_PCREL_LO_RELOC: prefix = ""%pcrel_lo""; break; case DM_TPREL_HI_RELOC: prefix = ""%tprel_hi""; break; case DM_TPREL_LO_RELOC: prefix = ""%tprel_lo""; break; default: suffix = ""invalid use of '%%%c'""; break; } if (GET_CODE (op) == SYMBOL_REF) { fprintf (stream, ""%s%s%s"", prefix, XSTR (op, 0), suffix); return; } if (GET_CODE (op) == HIGH || GET_CODE (op) == LO_SUM) { riscv_print_operand_reloc (stream, XEXP (op, 0), context, hi_reloc); fprintf (stream, ""%s"", suffix); return; } fprintf (stream, ""%s"", prefix); output_addr_const (stream, op, GET_MODE (op), context); fprintf (stream, ""%s"", suffix); }" GCC,riscv,459,"riscv_promote_function_mode (unsigned int mode, tree type) { if (type == NULL_TREE) { switch (mode) { case SImode: return DImode; case HImode: return SImode; case QImode: return HImode; case SFmode: return DFmode; case XCmode: return SDmode; case ZCmode: return XDmode; default: return mode; } } else { if (type == signed char_type_node) return SImode; else if (type == unsigned char_type_node) return HImode; else if (type == signed short_type_node) return SImode; else if (type == unsigned short_type_node) return HImode; else if (type == signed int_type_node) return SImode; else if (type == unsigned int_type_node) return HImode; else if (type == signed long_type_node) return DImode; else if (type == unsigned long_type_node) return DIQmode; else if (type == signed long long_type_node) return TIQmode; else if (type == unsigned long long_type_node) return TIDQmode; else if (type == float_type_node) return DFmode; else if (type == double_type_node) return XFmode; else if (type == long double_type_node) return TFSmode; else return mode; } }" GCC,riscv,460,"riscv_register_move_cost (unsigned int from, unsigned int to) { unsigned int cost = 0; if (REGNO_REG_CLASS (from) == REGNO_REG_CLASS (to)) { if (REGNO_NARROWEST_MODE (from) == 8 && REGNO_NARROWEST_MODE (to) == 8) { if (to - from == 1) cost = 2; } } return cost; }" GCC,riscv,461, GCC,riscv,462,"bool riscv_regno_mode_ok_for_base_p (int regno, machine_mode mode, bool strict_p){ enum riscv_reg_class base_regs[] = {RV_INT_BASE_REGS, RV_FLOAT_BASE_REGS, RV_CSR_BASE_REGS}; if (regno < 0 || regno >= NUM_RV_REGS_FOR_MODE(mode)) return false; if (strict_p && TARGET_REG_OK_STRICT) return true; for (unsigned int i = 0; i < sizeof(base_regs) / sizeof(enum riscv_reg_class); i++) { if (regno >= RISC_REGNO_REGCLASS_FIRST(base_regs[i]) && regno <= RISC_REGNO_REGCLASS_LAST(base_regs[i])) return true; } return false; }" GCC,riscv,463,"function riscv_reorg() { if (target_machine == ""riscv"") { } }" GCC,riscv,464,"void riscv_restore_reg(int reg, int mem) { __asm__ ( ""lw %0, %1\n"" : ""=r"" (reg) : ""m"" (mem), ""riscv,1""); }" GCC,riscv,465,riscv_return_addr (void) { RTX return_addr = GEN_INT (0); return return_addr; } GCC,riscv,466,"riscv_return_in_memory (machine_mode mode, rtx value) { if (TARGET_SIGNAL_0 == riscv,0) { rtx var = gen_rtx_REG (mode, 0); emit_move_insn (var, value); return var; } else { return value; } }" GCC,riscv,467, GCC,riscv,468, GCC,riscv,469,bool riscv_save_reg_p(int regno) { #if TARGET_RISCV if (regno >= X5 && regno <= X9) { return true; } #endif return false; } GCC,riscv,470,"void riscv_save_restore_reg(int regno, int mode, int offset) { if (mode == RISCVMODE) { asm volatile ("" sw %0, %1(%2) \n"" : : ""r"" (regno), ""r"" (offset), ""r"" (SP) : ""%memory"", ""cc"" ); asm volatile ("" lw %0, %1(%2) \n"" : ""=r"" (regno) : ""r"" (offset), ""r"" (SP) : ""%memory"", ""cc"" ); } else }" GCC,riscv,471, GCC,riscv,472,"tree decl; { if (TREE_CODE (decl) == FUNCTION_DECL) { ASM_OUTPUT_COMMON_SECTION_NAME (asm_out_file, "".text""); } else { ASM_OUTPUT_COMMON_SECTION_NAME (asm_out_file, "".srodata""); } }" GCC,riscv,473,for (param = args - 1; param >= 0; param--) { stack_pointer = stack_pointer - riscv_function_arg_size(arg_types[param]); [stack_pointer] = incoming_param; } GCC,riscv,474, GCC,riscv,475, GCC,riscv,476, GCC,riscv,477,"unsigned int riscv_shorten_lw_offset(unsigned int base, unsigned int offset) { unsigned int max_offset = 2047; unsigned int min_offset = -2048; if (offset >= min_offset && offset <= max_offset) { return base + offset; } else if (offset > max_offset) { unsigned int excess = offset - max_offset; return base + excess; } else if (offset < min_offset) { unsigned int excess = min_offset - offset; return base + excess; } }" GCC,riscv,478,"void riscv_slow_unaligned_access(void* addr, size_t size, bool write) { if(((size_t)addr) % ALIGNMENT != 0) { if(size % ALIGNMENT != 0) { } }" GCC,riscv,479, GCC,riscv,480,"int riscv_split_const_insns(rtx X) { rtx highword, lowword; int high_cnt, low_cnt; int total_cnt; if (X < 2 || X % 2 != 0) return 0; highword = gen_rtx_CONST_DOUBLE (VOIDmode, riscv_const_high, X); lowword = gen_rtx_CONST_DOUBLE (VOIDmode, riscv_const_low, X); high_cnt = (X + 1) / 2; low_cnt = X / 2; total_cnt = high_cnt + low_cnt; return total_cnt; }" GCC,riscv,481, GCC,riscv,482,int64_t riscv_split_integer(int64_t val){ int32_t part1 = (int32_t)(val >> 32); int32_t part2 = (int32_t)(val); int64_t result = ((uint64_t)part1 << 32) | ((uint32_t)part2); return result; } GCC,riscv,483,"int riscv_split_integer_cost (int riscv, int v, int bits, int align, int prefer) { } ``` The parameter ""v"" represents the integer value that needs to be constructed, while ""bits"" represents the size of the value in bits. The parameter ""align"" is used to determine the required alignment for the value, and ""prefer"" is a flag that indicates if a specific register should be preferred for constructing the value. Next, we need to define the cost calculation logic for constructing the integer value ""VAL"" in the event that a scratch register is available. ``` int words = (bits + 31) / 32; if (prefer == 0) { return words * riscv; } else { return words * prefer; }" GCC,riscv,484,"bool riscv_split_symbol(rtx addr, machine_mode mode, rtx *low_out, rtx temp) { if (mode == MAX_MACHINE_MODE) { if ((GET_CODE(addr) == SYMBOL_REF || GET_CODE(addr) == CONST || GET_CODE(addr) == LABEL_REF) && ((SYMBOL_REF_LOCAL_P(addr) && SYMBOL_REF_THREAD_LOCAL_P(addr)) || (GET_CODE(addr) != SYMBOL_REF && get_attr(addr) == TARGET_THREAD))){ emit_move_insn(temp, addr); *low_out = gen_rtx_SYMBOL_REF(mode, XEXP(temp, 0)); return true; } } else { if (GET_CODE(addr) == CONST) { HOST_WIDE_INT high = CONST_DOUBLE_HIGH(addr); HOST_WIDE_INT low = CONST_DOUBLE_LOW(addr); *low_out = gen_int_mode(low, mode); return true; } } return false; }" GCC,riscv,485,"riscv_store_data_bypass_p (rtx in_insn, rtx out_insn) { if (!single_set_p (in_insn) || !single_set_p (out_insn) || !verify_parallel_clobbers_uses (in_insn) || !verify_parallel_clobbers_uses (out_insn)) return false; rtx out_vars = XEXP (out_insn, 0); rtx in_addr = XEXP (in_insn, 0); while (out_vars) { if (XEXP (out_vars, 0) == in_addr) return false; out_vars = XEXP (out_vars, 1); } if (GET_CODE (in_insn) == PARALLEL) { rtx tem; int i; for (i = 0; i < XVECLEN (in_insn, 0); i++) { tem = XVECEXP (in_insn, 0, i); if (GET_CODE (tem) == SET) { rtx in_src = SET_SRC (tem); if (MEM_P (in_src)) return true; } } } else { rtx in_src = SET_SRC (in_insn); if (MEM_P (in_src)) return true; } return false; }" GCC,riscv,486,"unsigned long long riscv_strip_unspec_address(unsigned long long OP, unsigned int riscv) { if (OP == UNSPEC_ADDRESS) { unsigned long long address = extract_address(OP, riscv); return address; } else { return OP; } } int main() { unsigned long long address = riscv_strip_unspec_address(UNSPEC_ADDRESS, riscv); printf(""Address: %llx\n"", address); unsigned long long operand = riscv_strip_unspec_address(0x12345678, riscv); printf(""Operand: %llx\n"", operand); return 0; }" GCC,riscv,487,"uint32_t riscv_subword(uint64_t op, bool high_p) { if (high_p) { return (uint32_t)(op >> 32); } else { return (uint32_t)op; } }" GCC,riscv,488,"bool riscv_symbolic_constant_p(rtx X, char *SYMBOL_TYPE) { bool is_symbolic = false; if (RTX_CODE (X) == SYMBOL_REF) { rtx this_sym = X; while (this_sym && SYMBOL_REF_LOCAL_P (this_sym)) { if (SYMBOL_REF_DECL (this_sym)) { if (strcmp (TARGET_VALUE, ""riscv,32"") == 0 && SYMBOL_REF_RISCV_SPEC_P (this_sym, ""RISC_V"")) { SYMBOL_TYPE = ""RISC_V""; is_symbolic = true; break; } } this_sym = before_next_symbolic_ref (this_sym); } } return is_symbolic; }" GCC,riscv,489,riscv_symbol_binds_local_p (tree x) { if (TREE_CODE (x) != SYMBOL_REF) return false; if (DECL_EXTERNAL (x)) return false; if (ELF_ST_BIND (DECL_ELF_TYPE (x)) == STB_LOCAL) return true; return false; } GCC,riscv,490,"bool riscv_tls_symbol_p (tree x) { if (TARGET_RISCV && XSTR (x, 0) == 'riscv,0') return true; return false; }" GCC,riscv,491, GCC,riscv,492,"riscv_unspec_address (rtx address, enum machine_mode mode, enum addr_space symbol_type) { rtx unspec_addr = gen_rtx_UNSPEC (Pmode, address, symbol_type, UNSPEC_RISCV); return unspec_addr; }" GCC,riscv,493,"riscv_unspec_address_offset (rtx symbol, int offset) { rtx unspec_address = gen_rtx_UNSPEC_SYMBOL (SYMBOL_TYPE, symbol); unspec_address = plus_constant (unspec_address, offset); set SYMBOL_ATTRIBUTE (unspec_address, RISCV, 1); return unspec_address; }" GCC,riscv,494,"rtx riscv_unspec_offset_high(rtx addr, rtx symbol_type) { if (GET_CODE(addr) == UNSPEC && XINT(addr, 1) == UNSPEC_ADDRESS) { rtx base = XEXP(addr, 0); if (GET_CODE(base) == CONST_INT) { HOST_WIDE_INT low = INTVAL(base); if (low > 0xffffffff) { HOST_WIDE_INT high = low >> 32; rtx high_const = GEN_INT(high); rtx sum = gen_rtx_LO_SUM(GET_MODE(addr), high_const, symbol_type); rtx temp = riscv_force_temporary(sum, GET_MODE(addr)); return temp; } } } return XEXP(addr, 0); }" GCC,riscv,495,bool riscv_use_save_libcall(riscv_target specific_value) { if (specific_value == riscv) { return (specific_value == 0); } else { return false; } } GCC,riscv,496,"riscv_valid_base_register_p (machine mode, rtx x, bool strict_p) { if (x == 0) return 0; if (strict_p && riscv_target_flags & MASK_REG_OK_STRICT) { if (REG_P (x)) return riscv_is_valid_base_register (REGNO (x)); else return 0; } else { if (REG_MODE_OK (mode, GET_MODE (x))) { if (REG_P (x)) return riscv_is_valid_base_register (REGNO (x)); else return 1; } else return 0; } }" GCC,riscv,497,"bool riscv_valid_lo_sum_p (enum machine_mode mode, enum riscv_sym_type sym_type) { if (mode == riscv && sym_type >= 0 && sym_type <= 2) { } else { return false; } }" GCC,riscv,498,"bool riscv_valid_offset_p(base_reg, mode) { base_regs = get_base_regs_riscv(); for base in base_regs { if(check_addrability(base, base_reg, mode) == false) { return false; } } return true; }" GCC,riscv,499,"void riscv_va_start(va_list arglist, int numargs) { arglist += 56; int offset = 0; int argtype = 1; for(int i = 0; i < numargs; i++) { *(int *)arglist = argtype; arglist += offset + 8; offset += 8; } }" GCC,riscv,500,"riscv_warn_func_return(tree decl) { if (TARGET_RISCV) { tree return_type = TREE_TYPE(decl); if (!is_void_type(return_type)) { const char *function_name = DECL_FUNCTION_NAME(decl); warning (""Function '%s' returns a non-void value but does not return a value"", function_name); } } }" GCC,riscv,501,"rtl riscv_zero_if_equal (rtx cmp0, rtx cmp1) { rtx reg = gen_rtx_REG (GET_MODE (cmp0)); rtx target_value = gen_rtvec (3, GEN_INT (riscv), GEN_INT (0), GEN_INT (0)); rtx cmp_insn = gen_rtx_COMPARE (GET_MODE (cmp0), cmp0, cmp1); rtx jcc = gen_rtx_SET (VOIDmode, reg, gen_rtx_VAR_INIT (VOIDmode, gen_rtx_CONST_VECTOR (Pmode, target_value))); emit_insn_after (cmp_insn, get_last_insn ()); emit_jump_insn_after (jcc, cmp_insn); return reg; }" GCC,riscv,502,bool extract_base_offset_in_addr(MEM *mem) { TARGET_RISCV riscv; if (mem->base_type == MEM_BASE_ADDR && mem->offset_type == MEM_OFFSET_ADDR) { riscv.base_addr = mem->baseaddr; riscv.offset_addr = mem->offsetaddr; riscv.target_specific_value = 0x101; return true; } else { riscv.base_addr = 0; riscv.offset_addr = 0; return false; } } GCC,riscv,503,"void split_plus(rtx x, rtx *base_ptr, rtx *offset_ptr) { if (GET_CODE(x) == PLUS) { if (GET_CODE(XEXP(x, 0)) == CONST_INT) { *base_ptr = XEXP(x, 0); *offset_ptr = XEXP(x, 1); } else { *base_ptr = x; *offset_ptr = const0_rtx; } } else { *base_ptr = x; *offset_ptr = const0_rtx; } }" GCC,riscv,504,"bool th_mempair_check_consecutive_mems(rtx MEM1, rtx MEM2, int MODE, bool* RESERVED) { #if (TARGET_RISCV) if (MODE != BYTE && MODE != HALF && MODE != WORD && MODE != DOUBLE) { return false; } if (MEM1->addr + GET_MODE_SIZE(MODE) != MEM2->addr) { return false; } if (MEM1->addr > MEM2->addr) { *RESERVED = true; } else { *RESERVED = false; } return true; #else return false; #endif }" GCC,riscv,505, GCC,riscv,506,"th_mempair_operand_p(MEM, MODE, X_OPNUM, DISPLACE, MAX_OFFSET, WRITABLE) { if (MODE != SImode || X_OPNUM != 4 || DISPLACE != MAX_OFFSET) return false; if (addressing_mode(mempair) != ""indirect with displacement"") return false; if (!WRITABLE) return false; if (MEM.base != riscv || MEM.index != 4) return false; return true; }" GCC,riscv,507,bool th_mempair_order_operands(rtx operands[]) { rtx riscv = operands[0]; int value_4 = operands[1]; int value_1 = operands[2]; int value_0 = operands[3]; int value_2 = operands[4]; int value_0_2 = operands[5]; int value_2_1 = operands[6]; int value_3 = operands[7]; if (value_0 > value_1 || value_1 > value_2) { operands[0] = value_2; operands[1] = value_1; operands[2] = value_0; operands[3] = value_4; operands[4] = value_0_2; operands[5] = value_2_1; operands[6] = value_3; return true; } return false; } GCC,riscv,508, GCC,riscv,509,"void th_mempair_prepare_save_restore_operands (int target, int arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7) { int OPERANDS[7] = {target, arg1, arg2, arg3, arg4, arg5, arg6}; OPERANDS[6] = arg7; th_mempair_operands_p(OPERANDS); }" GCC,riscv,510, GCC,riscv,511,"void th_mempair_save_regs(int reg1, int reg2) { __asm__ volatile ( ""sw %0, 4*0(%1)\n\t"" ""sw %0, 4*1(%1)\n\t"" ""sw %1, 4*2(%1)\n\t"" ""sw %1, 4*3\n\t"" ""c.add %0, %1, %2\n\t"" ""sw %0, 4*4(%1)\n\t"" ""sw %0, 4*5(%1)\n\t"" ""sw %0, 4*6(%1)\n\t"" ""sw %0, 4*7\n\t"" ""andi %0, %0, 0\n\t"" ""ori %0, %0, RISCV_FRAME_MARKER\n\t"" ""sw %0, 4*8(%1)\n\t"" ""c.and %0, %1, %2\n\t"" ""sw %0, 4*9(%1)\n\t"" : ""+&r"" (reg1), ""+&r"" (reg2) : ""r"" (reg1) : ""memory"" ); }" GCC,riscv,512, LLVM,NVPTX,0,"StringRef getPassName() { return ""Add implicit SYCL global offset""; }" LLVM,NVPTX,1, LLVM,NVPTX,2,"void runOnModule(Module &M) override { if (M.getModuleFlag(""nvvm.annotations"")) { LLVMContext &C = M.getContext(); std::string Annotation = ""kernel""; Constant *Value = M.getModuleFlag(""IR compiled to PTX must have nvvm.annotations""); MDNode *Node = MDNode::get(C, {MDString::get(C, Annotation), Value}); M.addModuleFlag(Module::Error, ""nvvm.annotations"", Node); } }" LLVM,NVPTX,3,"std::string getPassName() { return ""NVPTX Pass""; }" LLVM,NVPTX,4,"virtual bool runOnModule(Module &M) override { LLVMContext &Ctx = M.getContext(); const char* TargetSpecificValues = ""NVPTX,\""nvvm.annotations\"",3,1,\""kernel\"",0,0""; NamedMDNode *NMD = M.getOrInsertNamedMetadata(""nvvm.annotations""); NMD->addOperand(MDNode::get(Ctx, MDString::get(Ctx, TargetSpecificValues))); return true; }" LLVM,NVPTX,5,void NVPTXTargetMachine::getAnalysisUsage(AnalysisUsage &AU) const { TargetMachine::getAnalysisUsage(AU); AU.addRequired(); AU.addRequired(); } LLVM,NVPTX,6,"string getPassName() { return ""NVPTX specific alloca hoisting""; }" LLVM,NVPTX,7, LLVM,NVPTX,8,void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); AU.addRequired(); AU.addPreserved(); AU.addPreserved(); FunctionPass::getAnalysisUsage(AU); } LLVM,NVPTX,9,"std::string getPassName() { return ""NVPTX specific alloca hoisting""; }" LLVM,NVPTX,10,bool runOnFunction(Function &F) override { const TargetMachine &TM = getAnalysis().getTM(); const NVPTXTargetMachine &NVPTXTM = static_cast(TM); const NVPTXTargetLowering &NVPTXTL = *NVPTXTM.getSubtargetImpl(F)->getTargetLowering(); return false; } LLVM,NVPTX,11,void NVPTXTargetMachine::getAnalysisUsage(AnalysisUsage &AU) const { TargetPassConfig::getAnalysisUsage(AU); AU.addRequired(); return; } LLVM,NVPTX,12,void runOnFunction(Function &F) { if (F.getSubtarget().hasNVPTX()) { } } LLVM,NVPTX,13,void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); AU.addRequired(); } LLVM,NVPTX,14,"void NVPTXTargetMachine::getAnalysisUsage(AnalysisUsage &AU) const { TargetMachine::getAnalysisUsage(AU); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addPreserved(); AU.addPreserved(); AU.addPreserved(); AU.addRequired(); AU.addPreserved(); AU.addRequired(); AU.addPreserved(); AU.addRequired(); AU.addRequired(); AU.addRequired(); } The above implementation adds all the necessary analysis passes required for the NVPTX backend. It also preserves some of the analysis passes that should not be invalidated while performing transformations. Additionally, the function can print the list of NVPTX analysis usage as follows: void NVPTXTargetMachine::getAnalysisUsage(AnalysisUsage &AU) const { TargetMachine::getAnalysisUsage(AU); AU.addRequired(); AU.addPreserved(); AU.addPreserved(); std::cout << ""NVPTX analysis usage:\n""; for (const auto& an : NVPTXTargetMachine::getTargetAnalysisUsage()) { std::cout << an.first.c_str() << "" - "" << an.second << ""\n""; } }" LLVM,NVPTX,15,"static void getAnalysisUsage(const Function &F, AnalysisUsage &AU) { FunctionPass::getAnalysisUsage(F, AU); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addPreserved(); AU.addPreserved(); }" LLVM,NVPTX,16,namespace { class NVPTXFunctionInfo : public llvm::FunctionPass { public: static char ID; NVPTXFunctionInfo() : llvm::FunctionPass(ID) {} void getAnalysisUsage(llvm::AnalysisUsage &AU) const override { llvm::FunctionPass::getAnalysisUsage(AU); AU.addRequired(); AU.setPreservesAll(); AU.addRequiredTransitive(); } LLVM,NVPTX,17,"std::string getPassName() { return ""NVPTX specific alloca hoisting""; }" LLVM,NVPTX,18,void NVPTXTargetMachine::doFinalization(Module &M) { TargetMachine::doFinalization(M); } LLVM,NVPTX,19," void doInitialization() { AsmPrinter *AP = getAnalysis(); const Triple &TheTriple = getModule()->getTargetTriple(); bool Is64Bit = TheTriple.isArch64Bit(); bool IsWindows = TheTriple.isOSWindows(); const DataLayout &DL = getModule()->getDataLayout(); bool IsLittleEndian = DL.isLittleEndian(); bool IsBigEndian = DL.isBigEndian(); const TargetMachine &TM = getAnalysis(); bool IsPTX = TM.getTargetTriple().getArch() == Triple::nvptx; TargetFeatureList TF; TF.AddFeature(""nvptx""); TF.AddFeature(""nvptx128""); if (Is64Bit) { TF.AddFeature(""nvptx64""); } if (IsLittleEndian) { TF.AddFeature(""nvptxle""); } if (IsPTX) { TF.AddFeature(""ptx""); } const char* StartInlineAsm = "".nv.ptx""; const char* EndInlineAsm = "".end-nv.ptx""; NVPTX::ArchEnum Arch = NVPTX::CUDA; AP->setupNVPTXAsmPrinter(DL, TF, Arch, IsWindows, StartInlineAsm, EndInlineAsm); }" LLVM,NVPTX,20,"void EmitInstruction(const Instruction *I) { if (I->isBarrier()) { const NVPTXTargetLowerInfo &TLI = getTLI(); TLI.AdvanceHazardState(); } const NVPTXTargetLowerInfo &TLI = getTLI(); unsigned NVPTXSpecVal = NVPTX::CUDA; TLI.EmitInstruction(I, NVPTXSpecVal); }" LLVM,NVPTX,21,"MCOperand NVPTXLowerOperand(const MCInst &MI, unsigned int OpIndex, StringRef OpType, unsigned int FPType, const MCOperandInfo &Modifier, const MCSubtargetInfo &STI) { const MCOperand &Operand = MI.getOperand(OpIndex); if (Operand.isExpr()) { MCExpr *Expr = Operand.getExpr(); return MCOperand::createExpr(Expr); } if (Operand.isReg()) { unsigned int RegNum = Operand.getReg(); if (RegNum >= NVPTX::V0 && RegNum <= NVPTX::V31) { return MCOperand::createReg(RegNum); } else { return MCOperand::createImm(NVPTX::PTX_UNSUPPORTED_OPERAND); } } if (Operand.isImm()) { int64_t Imm = Operand.getImm(); if (OpType.equals_lower(""unknown"")) { return MCOperand::createImm(Imm); } else { return MCOperand::createImm(NVPTX::PTX_UNSUPPORTED_OPERAND); } }" LLVM,NVPTX,22, LLVM,NVPTX,23, LLVM,NVPTX,24,"void print (raw_ostream &OS, const MachineOperand& MO) { switch (MO.getType()) { case MachineOperand::MO_GlobalAddress: case MachineOperand::MO_ExternalSymbol: case MachineOperand::MO_BlockAddress: case MachineOperand::MO_ConstantPoolIndex: OS << ""NVPTX,0,0,\"", \"",0,0,4,NVPTX,8,0,\"", \"",0,\""generic(\"")\"",\""symbol type unknown\"",1,4\n""; break; default: llvm_unreachable(""Unknown operand type""); } }" LLVM,NVPTX,25,"void NVPTXTargetMachine::doFinalization(Module &M) { LLVMContext &Context = M.getContext(); Type *Int32Ty = Type::getInt32Ty(Context); ConstantInt *TargetSpecificValue = ConstantInt::get(Int32Ty, 0); M.addModuleFlag(Module::Warning, ""NVPTX"", TargetSpecificValue); M.addModuleFlag(Module::Error, ""NVPTX"", TargetSpecificValue); M.addModuleFlag(Module::Warning, ""NVPTX"", TargetSpecificValue); M.addModuleFlag(Module::Error, ""NVPTX"", TargetSpecificValue); TargetMachine::doFinalization(M); }" LLVM,NVPTX,26,"void doInitialization(Module &M) { Triple T(M.getTargetTriple()); StringRef CPU = M.getTargetCPU(); StringRef FS = M.getTargetFeatureString(); if (T.isArch(""nvptx"")) { if(CPU == ""nvptx"") { CPU = ""NVPTX""; FS = ""NVPTX""; } if(!M.getDataLayout().isDefault()) { errs() << ""Warning: Data layout specified for NVPTX backend is ignored.\n""; } M.setDataLayout(""e-i64:64-v16:16-v32:32-v64:64-v128:128-v256:256-v512:512-v1024:1024""); if(!M.getFunction(#4a1biceg) getCallingConv == CallingConv::C) { errs() << ""Warning: Calling convention specified for NVPTX backend is ignored.\n""; } M.setFunction(#4a1biceg) setCallingConv(CallingConv::PTX_Kernel); } M.getContext().addDiagnosticHandler(createNVPTXDAGToDAGISelErrorPrinter(Triple(M.getTargetTriple()), errs())); M.getContext().addDiagnosticHandler(createNVPTXExpandPseudoPass(errs())); TargetMachine &TM = M.getTargetMachine(); auto *AsmPrinter = TM.getAsmPrinter(); auto *PTXAsmPrinter = static_cast(AsmPrinter); PTXAsmPrinter->setTargetSpecificOptions(CPU, FS); }" LLVM,NVPTX,27,"void emitHeader() { const char* arch = ""NVPTX""; const char* cpu = ""NVPTX""; const char* features = ""\n""; const char* generated = ""// Generated by LLVM NVPTX Back-End\n""; const char* version = "".version 3.0\n""; const char* target = "".target ""; const char* texmode = ""texmode_independent""; const char* nvcl = NVPTX::NVCL; const char* cuda = NVPTX::CUDA; const char* map = "", map_f64_to_f32""; const char* debug = "", debug\n""; const char* size = "".address_size ""; const char* address = ""64""; const char* size_sep = ""32""; const char* new_line = ""\n""; std::cout << arch << "","" << cpu << "","" << features << generated << new_line << version << target << nvcl << Cuda << map << debug << size << address << size_sep << new_line << new_line; }" LLVM,NVPTX,28,"void EmitInstruction(const MachineInstr * inst, SelectionDAG& sm, HazardRecognizer& hs) { const TargetLowering * tl = sm.getTargetLowering(); SDValue NVPTX = sm.getConstant(NVPTX, tl->getPointerTy(sm.getDataLayout())); hs.advance(inst); sm.SelectCode(inst); }" LLVM,NVPTX,29," unsigned AsmVariant) { assert(Op < MI->getNumOperands() && ""Invalid operand number!""); const MachineOperand &MO = MI->getOperand(Op); bool IsMem = MO.isImm(); assert(MO.isGlobal() && ""Operand is not a global symbol!""); O << ""NVPTX""; if(AsmVariant) { O << "","" << AsmVariant; } O << "","" << MO.getImm() << "" ""; O << MO.getGlobal()->getName(); if (Modifier) O << ""("" << Modifier << "")""; O << ExtraCode; }" LLVM,NVPTX,30,"define void @readLine(i8* %filePtr) { entry: ; Extract the target specific value %targetSpecific = extractvalue {i32, i32, i32, i32} %0, 0 ; Check if the target specific value is NVPTX,0,0,500 %isNVPTX = icmp eq %targetSpecific, i8 500 br i1 %isNVPTX, label %read, label %exit read: ; Allocate a buffer of size 512 bytes to read the line %buffer = alloca [512 x i8] ; Read a line using the filePtr %line = call i32 @__nvptxReadLine(i8* %filePtr, i8* %buffer) ; Print the line to standard output call void @__nvptxPrintString(i8* %buffer) br label %exit exit: ret void }" LLVM,NVPTX,31,"void doFinalization() override { TargetMachine::doFinalization(); if (!TMII->getTargetTriple().isOSDarwin()) { bool alreadyPresent = false; for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) { StringRef SectionName = I->getSectionName(); if (SectionName == "".debug_loc"") { alreadyPresent = true; break; } } if (!alreadyPresent) { const std::string& TargetValue = ""\t.section\t.debug_loc\t{\t}""; Fn.getFunction()->appendToEndSection("".debug_loc"", TargetValue); } } }" LLVM,NVPTX,32,"void doInitialization(Module &M) { M.addAttr(""target-cpu"", ""NVPTX""); M.addAttr(""target-features"", ""NVPTX""); M.addAttr(""module-flag-asmprinter"", ""NVPTX,NVPTX""); M.addAttr(""module-flag-global-ctors"", ""Module has aliases, which NVPTX does not support.""); M.addAttr(""module-flag-global-ctor-nontrivial"", ""Module has a nontrivial global ctor, which NVPTX does not support.""); M.addAttr(""module-flag-global-dtors"", ""Module has a nontrivial global dtor, which NVPTX does not support.""); }" LLVM,NVPTX,33,"void NVPTXTargetLowering::emitBasicBlockStart(const MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI) const { if (MBB.getParent()->getFunction()->hasFnAttribute(""nvptx-nounroll"")) { MBB.getParent()->getFunction()->removeFnAttr(""nvptx-nounroll""); MBBI = MBB.insert(MBBI, NVPTX::Pragma); LLVMContext &Ctx = MBB.getParent()->getFunction()->getContext(); Value *StringValue = ConstantDataArray::getString(Ctx, ""\t.pragma \""nounroll\"";\n""); GlobalVariable *GV = new GlobalVariable(*MBB.getParent()->getParent()->getGlobalVariable(""llvm.used"")->getParent(), StringValue->getType(), true, GlobalValue::PrivateLinkage, StringValue, ""__""+Twine(NextLocalId)+""_LocalVar""); MBB.getParent()->getParent()->getGlobalVariable(""llvm.used"")->addOperand(GV); Type *IntPtrTy = Type::getInt8PtrTy(Ctx); Value *Idxs[] = { ConstantInt::get(IntPtrTy, 0), ConstantInt::get(IntPtrTy, 0) }; Value *GVInit = ConstantExpr::getGetElementPtr(StringValue, Idxs); new StoreInst(StringValue, GV, false, MBB.getFirstNonPHI()); } }" LLVM,NVPTX,34,"void emitFunctionBodyEnd(Function &F, std::string &NVPTX) { Module &M = *F.getParent(); BasicBlock *EntryBB = &F.getEntryBlock(); BasicBlock *LastBB = &F.back(); IRBuilder<> Builder(LastBB->getTerminator()); Instruction *InsertPt = Builder.GetInsertPoint(); GlobalVariable *GV = new GlobalVariable(M, Type::getInt8PtrTy(F.getContext()), false, GlobalValue::InternalLinkage, 0, NVPTX); Value *V = Builder.CreateLoad(Type::getInt8PtrTy(F.getContext()), GV); SwitchInst *Switch = Builder.CreateSwitch(V, EntryBB, NumTargets); for (unsigned i = 0; i < NumTargets; i++) { BasicBlock *CaseBB = BasicBlock::Create(F.getContext(), """", &F); ... Builder.CreateBr(EntryBB); Switch->addCase(ConstantInt::get(Type::getInt32Ty(F.getContext()), i), CaseBB); } Builder.SetInsertPoint(InsertPt); }" LLVM,NVPTX,35," MCSymbol *StartSym) { const NVPTXSubtarget &STI = MF.getSubtarget(); if (STI.getTargetTriple().getArch() != Triple::nvptx && STI.getTargetTriple().getArch() != Triple::nvptx64) { return; } MachineBasicBlock &MBB = *MF.getBlockNumbered(0); MachineBasicBlock::iterator MBBI = MBB.begin(); MachineBasicBlock::iterator MBBE = MBB.end(); if (IsKernel) { MCSymbol *KernelEntrySym = MF.getContext().createTempSymbol(); OutStreamer->EmitLabel(KernelEntrySym); OutStreamer->EmitSymbolAttribute(KernelEntrySym, MCSA_Global); if (STI.getTargetTriple().getAppropriateCpu() == ""sm_20"" || STI.getTargetTriple().getAppropriateCpu() == ""sm_21"") { OutStreamer->EmitSymbolAttribute(KernelEntrySym, MCSA_ELF_TypeFunction); } else { OutStreamer->EmitSymbolAttribute(KernelEntrySym, MCSA_ELF_TypeObject); } if (STI.getSmVersion() >= 20) { OutStreamer->EmitRawText(""\t.visible\t.entry\t_f""); OutStreamer->EmitRawText(""nt""); } else { OutStreamer->EmitRawText(""\t.visible\t.entry\t_f""); OutStreamer->EmitRawText(""nt""); }" LLVM,NVPTX,36,"void emitFunctionEntryLabel(LLVMContext &Context, raw_ostream &Out, Function *F) { Triple targetTriple(F->getTargetTriple()); const DataLayout *DL = &F->getParent()->getDataLayout(); if (targetTriple.getArch() != Triple::nvptx) { llvm_unreachable(""EmitFunctionEntryLabel called with non-NVPTX target!""); } std::string FuncName = F->getName().str(); Type *RetType = F->getReturnType(); std::vector ArgTypes; for (Function::arg_iterator Arg = F->arg_begin(), E = F->arg_end(); Arg != E; ++Arg) { ArgTypes.push_back(Arg->getType()); } Out << ""\t.entry\t"" << FuncName << "", .func ""; Out << DL->getTypeAllocSize(RetType) << "", ""; for (int i = 0; i < ArgTypes.size(); i++) { Out << DL->getTypeAllocSize(ArgTypes[i]); if (i != ArgTypes.size() - 1) { Out << "", ""; } } Out << "" {\n""; }" LLVM,NVPTX,37,"void emitGlobals() { if (Target not supported by NVPTX backend) return; for (variable in module.global_variables) { if (variable has constant initializer) { emitInstruction(target-specific_instr, variable.initializer.value); } else { emitInstruction(target-specific_warn_instr, warning_message); continue; } }" LLVM,NVPTX,38,"void emitHeader() { OutStreamer.EmitRawText("".version 7.0\n""); OutStreamer.EmitRawText("".target nvptx64sm_70\n""); OutStreamer.EmitRawText(""//\n""); OutStreamer.EmitRawText(""// Generated by LLVM NVPTX Back-End\n""); OutStreamer.EmitRawText(""//\n""); OutStreamer.EmitRawText(""\n""); OutStreamer.EmitRawText("".version ""); OutStreamer.EmitRawText(""7""); OutStreamer.EmitRawText("".target nvptx64sm_70, texmode_independent, debug\n""); OutStreamer.EmitRawText("".address_size 64 32\n""); OutStreamer.EmitRawText(""\n""); }" LLVM,NVPTX,39," MachineBasicBlock::iterator II) const { const TargetMachine &TM = getTargetMachine(); const TargetInstrInfo &TII = *TM.getInstrInfo(); DebugLoc dl = MI->getDebugLoc(); std::string ImplicitDefMsg = ""implicit-def: ""; ImplicitDefMsg += NVPTXTargetName; ImplicitDefMsg += "","" + NVPTXTargetName + "", 0, "" + ImplicitDefMsg + "","" + NVPTXTargetName + "","" + NVPTXTargetName + "",""; MachineInstrBuilder MIB = BuildMI(MBB, II, dl, TII.get(TargetOpcode::IMPLICIT_DEF)); MIB.addImm(0); MIB.addImm(true); MIB.addExternalSymbol(ImplicitDefMsg, TII.getExceptionAddressRegister(), 0); for (unsigned i = TII.getNumRegOperands(MI); i < MI->getNumOperands(); i++) { MIB.add(MI->getOperand(i)); } MBB.insert(II, MIB); }" LLVM,NVPTX,40," MachineBasicBlock::iterator &MBBI) { if(MI->getOpcode() == NVPTX::INST_UNSUPPORTED) { llvm::errs() << ""Unsupported instruction encountered.""; return; } const MCInstrDesc &Desc = MI->getDesc(); unsigned opcode = MI->getOpcode(); unsigned extVal = MI->getOperand(MI->getNumExplicitOperands()).getImm(); if(opcode == NVPTX::INST_ESYEN) { MBB->insertAfter(MBBI, EMIT_EYEN(extVal)); } else { MBB->insertAfter(MBBI, DESC_OPCODE(opcode, extVal)); } MBBI++; MBBI->eraseFromParent(); }" LLVM,NVPTX,41,"void emitStartOfAsmFile(raw_ostream &OS) const override { OS << ""\tNVPTX,NVPTX,NVPTX,NVPTX,NVPTX,128\n""; }" LLVM,NVPTX,42,"LLVMValueRef getFunctionFrameSymbol(LLVMContextRef Context) { LLVMValueRef Function = LLVMGetGlobalParent(LLVMGetCurrentFunction()); LLVMValueRef Frame = LLVMGetFunctionAttr(Function, LLVMStackFrame); unsigned int StackKind = LLVMGetStackFrameKind(Frame); if (StackKind == LLVMPseudoStackFrame) { LLVMValueRef FunctionFrameSymbol = LLVMGetNamedGlobal(Context, ""__pseudo_stack_"" + LLVMGetValueName(Function)); LLVMSetTargetSpecificValue(FunctionFrameSymbol, ""NVPTX,NVPTX,128""); return FunctionFrameSymbol; } return NULL; }" LLVM,NVPTX,43, LLVM,NVPTX,44," raw_ostream &O, const char EarlyClobber) { O << NVPTX << "","" << NVPTX << "",0 ""; const MachineOperand &MO = MI->getOperand(OpNo); assert(MO.isReg() && ""Unexpected inline asm operand type!""); unsigned Reg = MO.getReg(); if (Reg) O << '%' << TM.getRegisterInfo()->getName(Reg); O << '('; if (MO.isImm()) { O << MO.getImm() << 'U'; if (MO.getImm() == 0 && Reg == 0) { O << '""'; } } else if (MO.isGlobal()) { O << * MO.getGlobal(); } else { O << '%' << TM.getRegisterInfo()->getName(MO.getReg()); } O << ')'; }" LLVM,NVPTX,45," const MCSubtargetInfo *STI) { int32_t Imm = MO.getImm(); if (!STI->getTargetTriple().getArch() == Triple::NVPTX) return; if (Imm == 0x00010001) { Printer.OS << ""NVPTX,NVPTX,0,1,0,0""; return; } Printer.printOperand(MO, STI); }" LLVM,NVPTX,46,"void runOnMachineFunction(MachineFunction &MF){ LLVMContext &Ctx = MF->getFunction()->getContext(); MachineBasicBlock &MBB = MF->front(); const char *TargetSpecific = ""NVPTX,NVPTX,\""}\\n\""""; const NVPTXOperand *TargetOp = NVPTXOperand::CreateImmString(TargetSpecific, Ctx); unsigned Opc = NVPTX::OPCODE_TARGET_SPECIFIC; MachineInstr *MI = MBB.CreateMachineInstr(Opc, DebugLoc(), true); MI->addOperand(*TargetOp); MBB.insert(MBB.end(), MI); MF->insert(MF->end(), MI); }" LLVM,NVPTX,47,"define void @doFinalization() { ret void } This function does not have any instructions as it is an overriden virtual function. It is defined as a placeholder for subclasses to implement any necessary clean-up after all passes have run. The NVPTX target specific values would be specified in the code that implements this function for the NVPTX backend. For example, the implementation for the function could be something like this: define dso_local void @doFinalization() #0 { %1 = bitcast i8* %ModuleGlobals to i32* store i32 0, i32* %1 ret void }" LLVM,NVPTX,48,"void print(llvm::raw_ostream &OS, const llvm::Value *V) { int target = 0; int verbose = 0; int type = V->getType()->getTypeID(); int bitSize = V->getType()->getPrimitiveSizeInBits(); if (V->getType()->isPointerTy()) { OS << ""NVPTX,""; target++; verbose++; type--; type--; } OS << target << "",""; verbose++; OS << verbose << "",""; if (type == llvm::Value::FunctionTyID) { OS << ""function,""; } else if (type == llvm::Value::StructTyID) { OS << ""struct,""; } else if (type == llvm::Value::ArrayTyID) { OS << ""array,""; } else if (type == llvm::Value::VectorTyID) { OS << ""vector,""; } else { OS << ""unknown type,""; } verbose++; if (bitSize == 1) { OS << ""bool,""; } else if (bitSize == 8 || bitSize == 16 || bitSize == 32 || bitSize == 64) { OS << bitSize << ""bit,""; } else { OS << bitSize / 8 << ""bytes,""; } verbose++; OS << ""nvptx::LowerConstant\""""; verbose++; OS << ""symbol type unknown\"",""; verbose++; OS << V->getType()->getPrimitiveSizeInBits() << "",""; verbose++; OS << V->getType()->getScalarSizeInBits() << "",""; verbose++; OS << V->getType()->getVectorNumElements() << "",""; verbose++; OS << V->getType()->getArrayNumElements() << "",""; verbose++; OS << V->getType()->getNumContainedTypes() << "",""; verbose++; OS << V->getType()->getContainedType(0)->getTypeID() << "",""; if (V->getType()->isPointerTy()) { OS << ""pointer type""; } else { OS << ""non-pointer type""; } verbose++; OS << "","" << std::endl; }" LLVM,NVPTX,49,"void doInitialization(Module &M){ Triple TargetTriple(M.getTargetTriple()); bool Is64Bit = TargetTriple.isArch64Bit(); const Target *TheTarget = M.getTarget(); bool hasGlobalCtors = false; bool hasGlobalDtors = false; if (GlobalVariable *GVCtors = M.getGlobalVariable(""llvm.global_ctors"")) { hasGlobalCtors = true; } if (GlobalVariable *GVDtors = M.getGlobalVariable(""llvm.global_dtors"")) { hasGlobalDtors = true; } TargetMachine &Target = static_cast(*TheTarget); const MCSubtargetInfo &STM = Target.getMCSubtargetInfo(); const MCRegisterInfo *MRI = Target.getMCRegisterInfo(); const MCAsmInfo *MAI = Target.getMCAsmInfo(); unsigned GlobalCtorSection = 0; unsigned GlobalDtorSection = 0; if (TargetTriple.getArch() != Triple::nvptx) { GlobalCtorSection = MRI->getGCC(generic_abs_gvar_section); GlobalDtorSection = MRI->getGCC(generic_abs_gvar_section); } if (TargetTriple.getArch() != Triple::nvptx) { if (hasGlobalCtors) { report_fatal_error(""Module has aliases, which NVPTX does not support.""); } if (hasGlobalDtors) { report_fatal_error(""Module has a nontrivial global ctor, which NVPTX does not support.""); } }" LLVM,NVPTX,50,"void emitGlobalVariable(GlobalVariable *GV) { const DataLayout &DL = GV->getParent()->getDataLayout(); Type *Ty = GV->getType()->getElementType(); StringRef Name = GV->getName(); GlobalValue::VisibilityTypes Vis = GV->getVisibility(); StringRef Section = GV->getSection(); unsigned align = GV->getAlignment(); Constant *Init = GV->getInitializer(); unsigned AddrSpace = GV->getType()->getAddressSpace(); std::ostringstream out; out << "".global "" << Name << "" ""; if (GV->isDeclaration()) { if (GV->hasExternalLinkage()) { out << "".extern ""; } else { if (Vis == GlobalValue::VisibilityTypes::HiddenVisibility) { out << "".hidden ""; } else { out << "".internal ""; } } } out << Ty << "" addrspace("" << AddrSpace << "") ""; if (!Section.empty()) { out << ""section "" << Section << "" ""; } if (align) { out << ""align "" << align << "" ""; }" LLVM,NVPTX,51,"LLVM MCOperand lowerOperand(NVPTXOperand Op, NVPTXInstruction Inst, String UOT, String UFP, NVPTXRegister Reg, NVPTXTargetRegister TR) { LLVM MCOperand LoweredOp; switch(Op.OperandKind) { case NVPOpKind: LoweredOp = LowerNVPOperand(Op, Inst); break; case NVPCCTIOpKind: LoweredOp = LowerNVPCCTIOperand(Op, Inst); break; case NVPTXCCOpKind: LoweredOp = LowerNVPTXCCOperand(Op, Inst); break; case NVPTXTexOpKind: LoweredOp = LowerNVPTXTexOperand(Op, Inst); break; case NVPTXLDCOpKind: LoweredOp = LowerNVPTXLDCOperand(Op, Inst); break; default: LoweredOp = LowerUnknownOperand(Op, UOT); break; } if(Inst.isFloatingPoint() && !Inst.canBeMutatedToNonFP() && TR.RegisterType != Reg.FPType) { LoweredOp = LowerUnsupportedFPOperand(LoweredOp, UFP); } return LoweredOp; }" LLVM,NVPTX,52, LLVM,NVPTX,53,"void NVPTXTargetLowering::emitGlobals(MachineModuleInfo *MMI, MachineFunction &MF, MachineConstantPool *MCP) { const DataLayout &DL = getDataLayout(); for (Module::const_global_iterator GV = MF.getFunction().getParent()->global_begin(), GVE = MF.getFunction().getParent()->global_end(); GV != GVE; ++GV) { if (GV->isDeclaration()) continue; int64_t alignment = DL.getPrefTypeAlignment(GV->getType()->getElementType()); int64_t SizeInBytes = DL.getTypeAllocSize(GV->getType()->getElementType()); int64_t Offset = getNextStackOffset(SizeInBytes, alignment); GlobalAddress Addr = DAG->getTargetGlobalAddress(GV, SDLoc(MF.getFunction()), MVT::i64); SDValue PtrNode = DAG->getConstant(Addr.getOffset(), SDLoc(MF.getFunction()), MVT::i64); PtrNode = DAG->getNode(ISD::ADD, SDLoc(MF.getFunction()), MVT::i64, DAG->getStackPtr(), PtrNode); SDValue StoreNode = DAG->getStore(DAG->getEntryNode(), SDLoc(MF.getFunction()), PtrNode, GlobalAddress, MachinePointerInfo(GV)); ChainNodes.push_back(StoreNode); } }" LLVM,NVPTX,54,"void emitHeader() { emitNVPTX(); emitNVPTX(); emitString(""//\\n""); emitString(""// Generated by LLVM NVPTX Back-End\\n""); emitString(""//\\n""); emitString(""\\n""); emitString("".version ""); emitInt(10); emitChar('.'); emitInt(10); emitString(""\\n""); emitString("".target ""); emitInt(NVPTX::NVCL) emitString("", texmode_independent""); emitInt(NVPTX::CUDA); emitString("", map_f64_to_f32""); emitString("", debug""); emitString(""\\n""); emitString("".address_size ""); emitInt(64); emitChar(','); emitInt(32); emitString(""\\n""); emitString(""\\n""); }" LLVM,NVPTX,55,"static void emitImplicitDef(Instruction *I, formatted_raw_ostream &OS) { Value *V = nullptr; if (match(I, m_Instruction(ImplicitDefInst, m_Value(V)))) { if (V->getType()->isNVPTX()) { OS << ""implicit-def: <""; OS << V->getType()->getNVPTXTargetValue(); OS << "">""; OS << '\n'; } } } ``` The `emitImplicitDef` function first checks if the instruction is an `IMPLICIT_DEF`. If it is, the function then checks if the value type is a `NVPTX` type. If so, the target-specific value for `IMPLICIT_DEF` in `NVPTX` is retrieved using the `getNVPTXTargetValue()` function. This value is then printed out in the proper verbose format. The function can be used in the `NVPTXAsmPrinter` class, which is responsible for converting LLVM IR instructions to assembly code for the NVPTX backend. In the `NVPTXAsmPrinter::doInstruction()` function, before emitting the instruction, the `emitImplicitDef` function can be called: ```llvm void NVPTXAsmPrinter::doInstruction(const MachineInstr &MI) { const Instruction *I = MI.getOpcode(); emitImplicitDef(I, OutStreamer); printInstruction(MI, OutStreamer); DI->EmitInstruction(I); }" LLVM,NVPTX,56,"void EmitInstruction(NVPTXMachineInstr *MI) { NVPTXSubtarget &ST = MI->getSubtarget(); const TargetInstrInfo &TII = *ST.getInstrInfo(); unsigned CudaInst = TII.getOpcode(nvptx::CUDA); MachineOperand MO = MI->getOperand(0); MI = ST.BuildMI(MI->getParent(), MI, DebugLoc(), TII.get(CudaInst)) .add(MO) .addImm(128); ST.updateHazardState(MI); }" LLVM,NVPTX,57,"MCOperand lowerOperand(const llvm::MachineInstr *Insn, const llvm::MCOperand &MCOp) { if (!MCOp.isReg() && !MCOp.isImm()) { llvm_unreachable(""Unknown operand type""); } if (MCOp.isFP()) { llvm_unreachable(""Unsupported FP type""); } return NVPTX::MCInstLowering.lowerOperand(Insn, MCOp); }" LLVM,NVPTX,58," raw_ostream &OS) { NVPTX::OperandType OT = static_cast(MO.getType()); OS << ""NVPTX,"" << TRI.getEncodingValue(MO.getReg()) << "",0""; OS << "", ""; OS << (MO.isDef() ? ""def"" : ""use"") << ','; OS << printReg(MO.getReg(), &TRI); OS << ',' << MO.getSubReg(); if (MO.isImm()) { OS << "", ""; OS << MO.getImm(); } else if (MO.isFPImm()) { OS << "", ""; OS << format(""%E"", MO.getFPImm()); } else if (MO.isGlobal()) { OS << "", ""; OS << printSymbol(MO.getGlobal()); } else { OS << ','; MO.print(OS, &TRI); } OS << ','; OS << OT; }" LLVM,NVPTX,59,"void NVPTXTargetMachine::doFinalization(Module& M) { if (isNVPTXTarget(M)) { outs() << ""//\t}"" << ""\n""; } }" LLVM,NVPTX,60,"void emitHeader() { printf(""%s"",NVPTX); printf(""%s"",NVPTX); printf(""%s"",""//\n""); printf(""// Generated by LLVM NVPTX Back-End\n""); printf(""%s"",""//\n""); printf(""\n""); printf(""%s "", "".version""); printf(""%d"", 10); printf(""%s"", "".""); printf(""%d"", 10); printf(""\n""); printf(""%s "", "".target""); printf(""%s"", NVPTX); printf(""%s"", NVPTX); printf(""%s"", NVPTX); printf("", texmode_independent""); printf(""//, debug""); printf(""\n""); printf(""%s "", "".address_size""); printf(""%d"", 64); printf(""%d"", 32); printf(""\n""); printf(""\n""); }" LLVM,NVPTX,61,"Function: void runOnMachineFunction(MachineFunction &MF) { 1. Get the MachineBasicBlock for the function from MachineFunction MF. 2. Create a new MachineInstr with opcode NVPTX. 3. Add the new instruction to the end of MachineBasicBlock. 4. Create another MachineInstr with opcode NVPTX and add it to the end of the block. 5. Create a third MachineInstr with opcode NVPTX and add it to the end of the block. 6. Create a new MachineInstr with opcode ""}"" and add it to the end of the block. 7. Finally, return the MachineBasicBlock. }" LLVM,NVPTX,62," raw_ostream &OS) const { OS << ""implicit-def: "" << ""NVPTX, NVPTX, 0, \""implicit-def: \"", \""implicit-def: \"""" << '\n'; }" LLVM,NVPTX,63,"StringRef getSymbolName(unsigned Index) { switch (Index) { case 0: return ""_$_""; case 1: return ""_%_""; default: report_fatal_error(""Unexpected symbol index!""); } }" LLVM,NVPTX,64, LLVM,NVPTX,65,"void doInitialization(Module &M) { TargetData* TD = new NVPTXTargetData(M.getDataLayout()); TargetMachine* TM = new NVPTXTargetMachine(M.getTargetTriple(), """", """", TargetOptions(), Reloc::Default, CodeModel::Default, CodeGenOpt::Default); NVPTXTargetLowering* TLI = new NVPTXTargetLowering(); NVPTXAsmPrinter* AP = new NVPTXAsmPrinter(*TM, *TLI, *TD); AP->setInlineAsmStart(""NVPTX"", ""NVPTX"", ""NVPTX"", ""NVPTX"", ""NVPTX"", 128, ""Start of file scope inline assembly""); AP->setInlineAsmEnd(""NVPTX"", ""NVPTX"", ""NVPTX"", ""NVPTX"", ""NVPTX"", 128, ""End of file scope inline assembly""); M.setAsmPrinter(AP); }" LLVM,NVPTX,66, LLVM,NVPTX,67,EmitInstruction() { NVPTXTargetInfo::EmitInstruction(NVPTX); NVPTXTargetInfo::EmitInstruction(NVPTX); NVPTXTargetInfo::EmitInstruction(128); NVPTXTargetInfo::EmitInstruction(NVPTX); NVPTXTargetInfo::EmitInstruction(NVPTX::CUDA); } LLVM,NVPTX,68,"void doFinalization() { printf(""NVPTX,NVPTX,0,0,\""//\\t}\"",NVPTX""); }" LLVM,NVPTX,69,"void doInitialization(Module &M) { M.getFunctionList().clear(); std::string DataLayout = ""e-p:64:64-f64:64""; M.setDataLayout(DataLayout); M.setTargetTriple(""nvptx64-nvidia-cuda""); M.addModuleFlag(Module::AppendUnique, ""nvptx"", StringRef(""NVPTX""), ""NVPTX""); M.addModuleFlag(Module::AppendUnique, ""nvptx"", StringRef(""NVPTX""), ""NVPTX""); M.addModuleFlag(Module::AppendUnique, ""nvptx"", StringRef(""NVPTX""), ""NVPTX""); M.addModuleFlag(Module::AppendUnique, ""nvptx"", StringRef(""NVPTX""), ""NVPTX""); M.addModuleFlag(Module::AppendUnique, ""nvptx"", StringRef(""NVPTX""), ""NVPTX""); M.addModuleFlag(Module::AppendUnique, ""nvptx"", StringRef(""Module has aliases, which NVPTX does not support.""), ""llvm.global_ctors""); M.addModuleFlag(Module::AppendUnique, ""nvptx"", StringRef(""Module has a nontrivial global ctor, which NVPTX does not support.""), ""llvm.global_dtors""); M.addModuleFlag(Module::AppendUnique, ""nvptx"", StringRef(""Module has a nontrivial global dtor, which NVPTX does not support.""), ""llvm.global_dtors""); M.addModuleFlag(Module::AppendUnique, ""nvptx"", StringRef(""nvptx""), 128); M.addModuleFlag(Module::AppendUnique, ""nvptx"", StringRef(""Start of file scope inline assembly""), ""llvm.global_ctors""); M.addModuleFlag(Module::AppendUnique, ""nvptx"", StringRef(""End of file scope inline assembly""), ""llvm.global_dtors""); }" LLVM,NVPTX,70,"void emitHeader() { Emit ""\n""; Emit "".llvm_nvptx_init\n""; Emit "".module asmdetails\n""; Emit ""{\n""; Emit ""NVPTX, NVPTX, NVPTX\n""; Emit ""//\n""; Emit ""// Generated by LLVM NVPTX Back-End\n""; Emit ""//\n""; Emit ""\n""; Emit "".version ""; Emit 10; Emit "".""; Emit 10; Emit ""\n""; Emit "".target NVPTX, NVPTX, NVPTX::NVCL, texmode_independent, debug\n""; Emit "".address_size 64, 32\n""; Emit ""}\n""; }" LLVM,NVPTX,71,"void emitImplicitDef(const MachineInstr *MI, raw_ostream &OS, const TargetMachine &TM, const TargetRegisterInfo *TRI, bool IsVerbose) { if (!MI->isImplicitDef()) return; if (!IsVerbose) return; OS << ""implicit-def: ""; }" LLVM,NVPTX,72,"void EmitInstruction(Instruction *I, const NVPTXSubtarget& Subtarget) { switch (I->getOpcode()) { case Instruction::Add: Subtarget.setHazardState(NVPTXSubtarget::IS_ADD); break; case Instruction::Mult: Subtarget.setHazardState(NVPTXSubtarget::IS_MULT); break; } LLVMTargetMachine &TM = getTargetMachine(); TM.emitInstruction(I, Subtarget); }" LLVM,NVPTX,73, LLVM,NVPTX,74,"void runOnMachineFunction() { MachineFunction *MF = getMachineFunction(); const TargetMachine *TM = MF->getTarget().getTargetMachine(); const NVPTXSubtarget *ST = &TM->getSubtarget(); MCContext &Ctx = MF->getContext(); raw_pwrite_stream &OS = Ctx.getOHStream(); OS << ""NVPTX,NVPTX,\""}\\n\""""; }" LLVM,NVPTX,75,"void NVPTXTargetMachine::doInitialization(Module &M) { const DataLayout DL = getDataLayout(); AsmPrinter *AP = new NVPTXAsmPrinter(*this, *csp, DL); OutStreamer.reset(AP); TargetMachine::doInitialization(M); }" LLVM,NVPTX,76,"void emitHeader() { outs() << ""NVPTX,NVPTX,"" << ""\""//\\n\"",\""// Generated by LLVM NVPTX Back-End\\n\"",\""//\\n\"",\""\\n\"","" << "".version "" << 10 << ""."" << 10 << ""\\n,"" << "".target "" << NVPTX::NVCL << "","" << "" texmode_independent"" << NVPTX::CUDA << "","" << "" map_f64_to_f32"" << "","" << "" debug"" << ""\\n,"" << "".address_size "" << ""64"" << "","" << ""32"" << ""\\n,"" << ""\\n""; }" LLVM,NVPTX,77,"void emitHeader(llvm::raw_ostream& Out) { Out << ""NVPTX,NVPTX,NVPTX,"" << ""\""//\\n\"", \""// Generated by LLVM NVPTX Back-End\\n\"", \""//\\n\"", \""\\n\"", \"".version "" << 10 << ""."" << 10 << ""\\n\"", \"".target "" << NVPTX << "", "" << NVPTX << "", "" << NVPTX << ""::NVCL, texmode_independent, debug\\n\"", \"".address_size "" << 64 << "", "" << 32 << ""\\n\\n\""""; }" LLVM,NVPTX,78, LLVM,NVPTX,79,"Value *getFunctionFrameSymbol(Module *M, Function *F) { Triple TargetTriple = Triple(F->getParent()->getTargetTriple()); if (TargetTriple.getArch() != Triple::NVPTX) { return nullptr; } LLT PtrTy = LLT::pointer(0, 128); auto ST = M->getDataLayout().getStackInfo(TargetTriple).getPreferredFixedStackSize(); Value *FramePtr = CF->getIncomingValue(0); if (!TargetOptions::checkStackFrameSymbol(F->size(), M, T, StackInfo, 0, &CF->getFirstBlock())) { return CF; } return nullptr; }" LLVM,NVPTX,80,"void NVPTXTargetMachine::doFinalization(Module &M) { TargetMachine::doFinalization(M); NVPTX = 0; errs() << ""\t.section\t.debug_loc\t{\t}\n""; errs() << ""Finalization for NVPTX backend complete.\n""; }" LLVM,NVPTX,81,"void doInitialization(Module &M) { NVPTXTargetMachine &TM = static_cast(getTargetMachine()); TM.setAsmPrinter(createNVPTXAsmPrinter(TM, TM.getTargetData())); TM.setAsmVerbosityDefault(true); TM.setLongJmpInstr(""mov.u32""); TM.setStackAlignment(4); TM.setInputAS(""0""); TM.setOutputAS(""0""); TM.setAllocaAS(""3""); TM.setGlobalAS(""3""); if (M.end_alias() != M.alias_end()) { TM.report_fatal_error( ""Module contains aliases, which NVPTX does not support."" ); } if (M.getNamedValue(""llvm.global_ctors"")) { TM.report_fatal_error( ""Module has a nontrivial global ctor, which NVPTX does not support."" ); } if (M.getNamedValue(""llvm.global_dtors"")) { TM.report_fatal_error( ""Module has a nontrivial global dtor, which NVPTX does not support."" ); } TM.setMaxRegisterCount(128); dbgs() << ""Start of file scope inline assembly\n""; dbgs() << ""End of file scope inline assembly\n""; }" LLVM,NVPTX,82,"void print (Stream &stream, Operand op) { Instruction* inst = op->getInstruction(); StringRef name = inst->getName(); unsigned int flagsLow = inst->getFlagsOp(0).getImm(); unsigned int flagsHigh = inst->getFlagsOp(1).getImm(); unsigned int offsetLow = inst->getImmOp(0).getImm(); unsigned int offsetHigh = inst->getImmOp(1).getImm(); unsigned int symTypeLow = inst->getSymTypeOp(0).getImm(); unsigned int symTypeHigh = inst->getSymTypeOp(1).getImm(); unsigned int symNameLow = inst->getSymNameOp(0).getImm(); unsigned int symNameHigh = inst->getSymNameOp(1).getImm(); unsigned int reg = inst->getRegOp(0).getReg(); stream << ""NVPTX,"" << flagsLow << "","" << flagsHigh << "",\"""" << name << ""\"",0,0,"" << offsetLow << "",NVPTX,"" << offsetHigh << "",0,\""generic("" << reg << "")\"","" << symTypeLow << "","" << symTypeHigh << ""\n""; }" LLVM,NVPTX,83,"void doFinalization() { NVPTX::doFinalization(); NVPTX::doFinalization(); NVPTX::doFinalization(); NVPTX::EmitSection("".section .debug_loc {\n\t}\n""); }" LLVM,NVPTX,84,"void print(raw_ostream &stream, Value *operand) { if (operand->getType() == nvptx::LowerConstant) { printValueAsOperand(stream, operand, true, true); } stream << ""nvptx,0,0,"", "",0,0,4,8,0,"", "",nvptx::LowerConstant,""symbol type unknown"",1,4""; }" LLVM,NVPTX,85,"void doInitialization(Module &M) { std::unique_ptr TM(new NVPTXTargetMachine(M)); std::unique_ptr TMPtr = std::move(TM); TargetIRAnalysis TIRA(TMPtr->getTargetLowering()); std::unique_ptr AP( new NVPTXAsmPrinter(M, TMPtr->getTargetTriple(), TIRA)); TargetRegistry::RegisterAsmPrinter(*TMPtr, AP.get()); const std::string moduleAliases = ""Module has aliases, which NVPTX does not support.""; const std::string moduleGlobalCtors = ""llvm.global_ctors, Module has a nontrivial global ctor, which NVPTX does not support.""; const std::string moduleGlobalDtors = ""llvm.global_dtors, Module has a nontrivial global dtor, which NVPTX does not support.""; const int inlineAsmStart = 128; const std::string inlineAsmStartDelimiter = ""; Start of file scope inline assembly""; const std::string inlineAsmEndDelimiter = ""; End of file scope inline assembly""; AP->ModuleAliases = moduleAliases; AP->ModuleGlobalCtors = moduleGlobalCtors; AP->ModuleGlobalDtors = moduleGlobalDtors; AP->InlineAsmStart = inlineAsmStart; AP->InlineAsmStartDelimiter = inlineAsmStartDelimiter; AP->InlineAsmEndDelimiter = inlineAsmEndDelimiter; AP->doInitialization(M); }" LLVM,NVPTX,86,"void print(raw_ostream &OS, const Value *V) { if (!V) { OS << ""<>""; return; } OS << ""NVPTX,"" << V->getType()->getTypeID() << "",""; switch (V->getValueID()) { case Value::ArgumentVal: OS << ""argument("" << V->getName() << "")""; break; case Value::BasicBlockVal: OS << ""label("" << V->getName() << "")""; break; case Value::FunctionVal: OS << ""function("" << cast(V)->getName() << "")""; break; case Value::GlobalVariableVal: OS << ""global("" << V->getName() << "")""; break; case Value::ConstantVal: OS << ""constant("" << V->getName() << "")""; break; case Value::InstructionVal: OS << ""instruction("" << V->getName() << "")""; break; default: OS << ""generic("" << V->getName() << "")""; } OS << "","" << V->getType()->getPrimitiveSizeInBits() << "",""; if (V->getType()->isGenericPointerTy()) { OS << ""generic("" << V->getType()->getPointerAddressSpace() << "")""; } else if (V->getType()->isPtrOrPtrVectorTy()) { OS << ""ptr("" << V->getType()->getPointerAddressSpace() << "")""; } else if (V->getType()->isVectorTy()) { VectorType *VTy = cast(V->getType()); OS << ""vector("" << VTy->getNumElements() << "":"" << VTy->getElementType()->getPrimitiveSizeInBits() << "")""; } else if (V->getType()->isArrayTy()) { ArrayType *ATy = cast(V->getType()); OS << ""array("" << ATy->getNumElements() << "":"" << ATy->getElementType()->getPrimitiveSizeInBits() << "")""; } else if (V->getType()->isIntegerTy()) { OS << ""integer("" << V->getType()->getPrimitiveSizeInBits() << "")""; } else if (V->getType()->isFloatingPointTy()) { OS << ""float("" << V->getType()->getPrimitiveSizeInBits() << "")""; }" LLVM,NVPTX,87,"void doInitialization() { TargetOptions Options; Options.Triple = ""nvptx""; Options.DataLayout = ""e-p:32:32:32""; Options.ABIName = ""PTX""; TargetMachine *TM = GetTargetMachine(Options); AsmPrinter.TM = TM; Module.addModuleFlag(Module::Warning, ""NVPTX"", ""NVPTX""); Module.addModuleFlag(Module::Warning, ""PTX"", ""NVPTX""); Module.addModuleFlag(Module::Warning, ""NVPTX"", ""NVPTX""); Module.addModuleFlag(Module::Warning, ""PTX"", ""NVPTX""); Module.addModuleFlag(Module::Warning, ""NVPTX"", ""Module has aliases, which NVPTX does not support.""); Module.addModuleFlag(Module::Warning, ""PTX"", ""Module has aliases, which NVPTX does not support.""); Module.addModuleFlag(Module::Warning, ""NVPTX"", ""128""); Module.addModuleFlag(Module::Warning, ""PTX"", ""128""); Module.addModuleFlag(Module::Warning, ""NVPTX"", ""Start of file scope inline assembly""); Module.addModuleFlag(Module::Warning, ""PTX"", ""Start of file scope inline assembly""); Module.addModuleFlag(Module::Warning, ""NVPTX"", ""End of file scope inline assembly""); Module.addModuleFlag(Module::Warning, ""PTX"", ""End of file scope inline assembly""); dbgs() << ""NVPTX AsmPrinter setup for new module completed\n""; }" LLVM,NVPTX,88,"void doInitialization(NVPTXTargetMachine &TM) { const NVPTXTargetMachine::NVPTXSubtarget &ST = TM.getSubtarget(); std::unique_ptr Streamer(TM.createMCStreamer(ST.getTargetTriple(), TM.getContext())); std::unique_ptr MAB(TM.getTarget().createMCAsmBackend(ST, TM.getDataLayout())); Streamer->EmitLabel(llvm::MCSymbol::create(TM.getContext())); Streamer->EmitLabel(llvm::MCSymbol::create(TM.getContext(), ""NVPTX"")); ASM.setMachine(Streamer.get()); ASM.setTarget(TM.getTarget()); ASM.getModule()->setDataLayout(ST.getDataLayout()); ASM.getModule()->setTargetTriple(ST.getTargetTriple()); ASM.getModule()->print(Streamer.get(), nullptr); ASM.MI->EmitIntValue(NVPTX::INLINEASM_START, 4); ASM.MI->EmitIntValue(NVPTX::INLINEASM_END, 0); }" LLVM,NVPTX,89, LLVM,NVPTX,90,"virtual void doFinalization() override { TargetLowering::doFinalization(); setTargetSpecificValue(NVPTX); setTargetSpecificValue(NVPTX, 0); setTargetSpecificValue(0, 0); setTargetSpecificValue(NVPTX, NVPTX); setTargetSpecificValue(NVPTX, NVPTX); }" LLVM,NVPTX,91, LLVM,NVPTX,92,"void doInitialization() { TargetOptions options; options.MCOptions.AsmVerbose = true; options.MCOptions.MCUseDwarfDirectory = true; options.MCOptions.PreserveAsmComments = true; Triple TheTriple(""nvptx-nvidia-cuda""); DataLayout DL(""e-p:32:32:32-f64:64:64-i64:64:64-n32""); std::unique_ptr Machine(TheTarget->createTargetMachine(TheTriple.getTriple(), ""nvptx-nvidia-cuda"", ""CUDA"", options)); TargetMachineNVPTX *NPTXM = (TargetMachineNVPTX *)Machine.get(); MIRBuilder.setTargetStreamer(new NVPTXTargetStreamer(MIRContext, NPTXM->getPTXTargetStreamer())); MCStreamer *S; if (options.MCOptions.EnableVerboseAsm) { S = new MCAsmStreamer(MIRContext, OS, true, true, true, true, true); } else { S = new NVPTXMCStreamer(MIRContext, OS, options.MCOptions.AsmVerbose, options.MCOptions.MCUseDwarfDirectory, options.MCOptions.PreserveAsmComments); } MIRBuilder.setStreamer(S); MIRDebugger &rMIR = getDBGInfo(); rMIR.prepareForOutput(MIRModule); emitTargetIndependentModuleMetadata(*MIRModule); }" LLVM,NVPTX,93,"void doInitialization(Module &M) { const Triple &TT = M.getTargetTriple(); if (TT.isNVPTX()) { const Target &T = M.getTarget(); TargetMachine *TM = T.createTargetMachine(TT.str(), """", """", TargetOptions(), Reloc::Default, CodeModel::Default, CodeGenOpt::Default); const NVPTXTargetMachine *PTM = static_cast(TM); const DataLayout &DL = PTM->createDataLayout(); NVPTXTargetStreamer *NVStreamer = static_cast(PTM->getTargetStreamer()); NVStreamer->setCUDA(); const MCSymbol *Begin = PTM->getSymbol(""_start_of_file_scope_inline_assembly""); const MCSymbol *End = PTM->getSymbol(""_end_of_file_scope_inline_assembly""); NVStreamer->EmitRawText(StringRef(""\t.section .nv_fatbin\n"")); NVStreamer->EmitLabel(Begin); NVStreamer->EmitRawText(StringRef(""\t.file \"""" + M.getName() + ""\""\n"")); M.setDataLayout(DL); M.setTarget(TM); } }" LLVM,NVPTX,94,"void doInitialization(const Module &M) { NVPTXTargetMachine &TM = static_cast(getTM()); NVPTXSubtarget ST(TM, M, TM.getTargetTriple().getArchName()); TM.setSubtarget(&ST); if (TM.Options.MCOptions.getCodeModel() == CodeModel::Default) TM.Options.MCOptions.setCodeModel(CodeModel::Small); Triple TargetTriple(TM.getTargetTriple()); SmallString<128> FullTriple; if (TargetTriple.isNVPTX()) FullTriple = TargetTriple.getTriple(); else if (ST.getTargetTriple().isMacOSX()) FullTriple = ST.getTargetTriple().getArchName(); else llvm_unreachable(""Unexpected OS in NVPTX backend.""); unsigned AsmPrinterVariant = 128; AsmPrinter *AP = nullptr; if (AsmPrinterVariant > 0) AP = new NVPTXAsmPrinter(TM, getPassRegistry()); if (AP == nullptr) llvm_unreachable(""NVPTXAsmPrinter variant not registered. Can't proceed.""); PM->add(AP); InitializeNVPTXLowering(TM); AP->setTargetTriple(FullTriple); AP->setShouldPrintDebugInfo(DIDT_DebugInfo); }" LLVM,NVPTX,95,"void doFinalization(const NVPTXSubtarget &STI, MachineFunction &MF) override { STI.setNVPTXVersion(0); STI.setNVPTXFeatures(0); }" LLVM,NVPTX,96,"void doInitialization(Module &M) { Triple TheTriple(M.getTargetTriple()); std::string MT = ""NVPTX,NVPTX,NVPTX,NVPTX,NVPTX,128,\""Start of file scope inline assembly\"",\""End of file scope inline assembly\""""; const DataLayout *TD = M.getDataLayout(); const Triple &Target = TD->getTargetTriple(); if (Target.getArch() == Triple::nvptx) { TargetMachine &TM = M.getTargetMachine(); std::unique_ptr AsmStreamer(TM.createObjectStreamer(M, llvm::outs())); AsmStreamer->setTargetStreamer(createNVPTXAsmStreamer(*AsmStreamer, TM)); M.setAsmPrinter(std::move(AsmStreamer)); M.getAsmPrinter()->InitTargetStreamer(Fn->getParent()); M.getAsmPrinter()->getTargetStreamer()->EmitInlineAsm(MT); } }" LLVM,NVPTX,97,"void NVPTXTargetLowering::emitGlobals(Module &M, NVPTXTargetMachine &TM, MCStreamer &Streamer, const ValueToValueMapTy &VMap, SmallVectorImpl &GlobalsToEmit) { for (Module::global_iterator GI = M.global_begin(), E = M.global_end(); GI != E; ++GI) { if (GI->isDeclaration()) { Streamer.EmitGlobalVariable(GI); } } for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ++FI) { Streamer.EmitFunctionHeader(FI); } for (Module::alias_iterator AI = M.alias_begin(), E = M.alias_end(); AI != E; ++AI) { Streamer.EmitAlias(AI); } for (Module::global_iterator GI = M.global_begin(), E = M.global_end(); GI != E; ++GI) { if (!GI->isDeclaration()) { Streamer.EmitGlobalVariable(GI); } }" LLVM,NVPTX,98,"void emitHeader(raw_ostream &OS) { OS << ""NVPTX,NVPTX,NVPTX,""; OS << ""\""//\\n\"",\""// Generated by LLVM NVPTX Back-End\\n\"",\""//\\n\"",\""\\n\""""; OS << "".version 10.10\\n""; OS << "".target NVPTX,NVPTX,NVPTX::NVCL, texmode_independent, map_f64_to_f32, debug\\n""; OS << "".address_size 64 32\\n\\n""; }" LLVM,NVPTX,99,"void NVPTXTargetMachine::emitImplicitDef() { if (NVPTXTargetFeatures.hasImplicitDefValue()) { emitVerbose(""implicit-def: "" + NVPTXTargetFeatures.getImplicitDefValue()); } else { emitVerbose(""implicit-def: NVPTX,NVPTX,0,\""implicit-def: \"",\""implicit-def: \""""); } }" LLVM,NVPTX,100, LLVM,NVPTX,101,void MyTargetPass::getAnalysisUsage(AnalysisUsage &AU) const { MyTargetPassBase::getAnalysisUsage(AU); AU.addRequired(); AU.addRequired(); } LLVM,NVPTX,102,"MCOperand lowerOperand(const MachineOperand &MO, const MCOperandInfo &OpInfo, const MCInstrDesc &Desc) { if (OpInfo.OperandType == MCOI::OPERAND_NVTXP) { if (OpInfo.OperandType == MCOI::OPERAND_UNKNOWN) { return MCOperand::createFPImm(0); } else { return MCOperand::createImm(OpInfo.OperandType); } } else { return MCOperand::createImm(0); } }" LLVM,NVPTX,103,"void print(llvm::raw_ostream &OS, const llvm::Value * Op) { OS << ""NVPTX,0,0,"", "",0,0,4,NVPTX,8,0,"", "",0,""generic("","")"",""symbol type unknown"",1,4; }" LLVM,NVPTX,104," const char *ExtraCode, raw_ostream &O) { assert(MI && ""Null MI pointer passed in for PrintAsmMemoryOperand""); assert(!ExtraCode && ""Extra address code in NVPTX backend currently unsupported""); assert(OpNo < MI->getNumOperands() && ""Operand index out of range for PrintAsmMemoryOperand""); const MachineOperand &MO = MI->getOperand(OpNo); if (!MO.isMemOperand()) { return; } const TargetMachine &TM = const_cast(MAI->getVPTXATarget()); const VPTXSubtarget &Subtarget = TM.getSubtarget(); bool IsNVPTX = Subtarget.getTargetTriple().getArch() == Triple::nvptx; if (IsNVPTX) { O << ""NVPTX, NVPTX, 0 ""; MO.getMemOperand()->print(O, MAI); O << ExtraCode; } }" LLVM,NVPTX,105,"bool PrintAsmOperand(llvm::Instruction *inst, int operand_index, llvm::AsmPrinter *printer, llvm::MCAsmInfo::AsmWriterVariantKind variant) { if (inst->getParent()->getParent()->getTargetTriple() != ""NVPTX,NVPTX,0,1,0,0"") { llvm::errs() << ""Target specific value not set for NVPTX!""; return false; } if (operand_index < 0 || operand_index >= inst->getNumOperands()) { llvm::errs() << ""Invalid operand index!""; return false; } llvm::Value *op_value = inst->getOperand(operand_index); std::string str_value; llvm::raw_string_ostream os(str_value); printer->printOperand(op_value, os, variant); printer->printOperand(os.str(), variant); return true; }" LLVM,NVPTX,106,"void readLine(int Nvptx, int zero1, int zero2, int size) { char line[size]; for(int i=0; i(); return true; }" LLVM,NVPTX,109,void runOnModule(Module &M) { if (M.getTargetTriple().isNVPTX()) { for (auto &F : M.functions()) { processFunction(F); } } } LLVM,NVPTX,110,virtual void getAnalysisUsage(AnalysisUsage &AU) const override { TargetPassConfig::getAnalysisUsage(AU); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); } LLVM,NVPTX,111,"std::string getPassName() { return ""NVPTX lower atomics of local memory""; }" LLVM,NVPTX,112,"class NVPTXFunctionPass : public llvm::FunctionPass { public: static char ID; NVPTXFunctionPass() : llvm::FunctionPass(ID) {} bool runOnFunction(llvm::Function & F) override { if (F.getParent()->getTargetTriple() != ""nvptx-nvptx"") return false; llvm::BasicBlock * EntryBB = &F.getEntryBlock(); for (llvm::Instruction & I : EntryBB->getInstList()) { } return true; }" LLVM,NVPTX,113,"void runOnFunction(Function &F) override { LLVMTargetMachine &TM = getAnalysis().get(); StringRef TargetTriple = TM.getTargetTriple().getTriple(); unsigned OptLevel = TM.getOptLevel(); bool NeedFixup = true; if (TargetTriple.startswith(""nvptx"")) { outs() << ""Target architecture: NVPTX\n""; outs() << ""Target triple: "" << TargetTriple << ""\n""; outs() << ""Optimization level: "" << OptLevel << ""\n""; outs() << ""Need fixup: "" << NeedFixup << ""\n""; } else { outs() << ""Error: This function can only be run on NVPTX backend.\n""; } }" LLVM,NVPTX,114,"Function* runOnFunction(Function* F) { LLVMContext &Ctx = F->getContext(); Module *M = F->getParent(); NVPTXTargetMachine &TM = getAnalysis(); const TargetSubtargetInfo *STI = TM.getSubtargetImpl(*F); std::unique_ptr PTM(TM.getTargetMachine(*STI)); const Triple &TheTriple = PTM->getTargetTriple(); if (!TheTriple.isNVPTX()) return nullptr; SmallVector SGPRNames; SGPRNames.push_back('N'); SGPRNames.push_back('V'); SGPRNames.push_back('P'); SGPRNames.push_back('T'); SGPRNames.push_back('X'); SmallVector VGPRNames; VGPRNames.push_back('N'); VGPRNames.push_back('V'); VGPRNames.push_back('P'); VGPRNames.push_back('T'); VGPRNames.push_back('X'); NVPTXTargetLowering *TLI = new NVPTXTargetLowering(*TM.getDataLayout()); TLI->setStackAlignmentFn([&](unsigned &Alignment) { const DataLayout *DL = TM.getDataLayout(); if (F->getFnAttribute(""no-stack-alignment"").getValueAsString() == ""true"") { Alignment = 1; } else { Alignment = DL->getStackAlignment(); if (DL->getTargetTriple().isNVPTX()) { if (!Alignment) { Alignment = 1; } } }" LLVM,NVPTX,115,"void NVPTXPass::runOnFunction(Function &F, TargetMachine &TM, OptimizationLevel optLevel) { }" LLVM,NVPTX,116,bool eliminateCallFramePseudoInstr(BasicBlock *BB) { const TargetMachine &TM = BB->getParent()->getParent()->getTarget(); if (TM.getTargetTriple().getArch() != Triple::nvptx) { return false; } bool success = false; for (Instruction &I : *BB) { if (I.isCallFrameSetupInstr() || I.isCallFrameDestroyInstr()) { I.replaceAllUsesWith(NOP); I.eraseFromParent(); success = true; } } return success; } LLVM,NVPTX,117,"void emitEpilogue(Function *F, NVPTXSubtarget &STI) { BasicBlock *ParentBB = F->begin(); BasicBlock *EpilogueBB = BasicBlock::Create(F->getContext(), ""epilogue"", F); ParentBB->getParent()->getBasicBlockList().insertAfter(ParentBB->getIterator(), EpilogueBB); IRBuilder<> IRB(EpilogueBB); Value *ThreadID = IRB.CreateCall(getNVPTXThreadIDFunction(STI)); BasicBlock *LoopBB = BasicBlock::Create(F->getContext(), ""loop"", F); IRB.CreateBr(LoopBB); IRB.SetInsertPoint(LoopBB); Value *IsLastThread = IRB.CreateICmpEQ(ThreadID, STI.getNumThreads() - 1); BasicBlock *IfBB = BasicBlock::Create(F->getContext(), ""if"", F); BasicBlock *ElseBB = BasicBlock::Create(F->getContext(), ""else"", F); IRB.CreateCondBr(IsLastThread, IfBB, ElseBB); IRB.SetInsertPoint(IfBB); IRB.CreateBr(EpilogueBB); IRB.SetInsertPoint(ElseBB); IRB.CreateBr(LoopBB); IRB.SetInsertPoint(ParentBB); }" LLVM,NVPTX,118,"void emitPrologue(Function *F) { BasicBlock *BB = &F->getEntryBlock(); IRBuilder<> Builder(&BB->front()); if (!F->hasFnAttribute(""shrink-wrapping"")) Builder.CreateCall( Intrinsic::getDeclaration(F->getParent(), Intrinsic::nvvm_emit_warning), {Builder.getInt32(1), Builder.getInt8Ptr(""Shrink-wrapping not yet supported"")}); Builder.CreateCall( Intrinsic::getDeclaration(F->getParent(), Intrinsic::nvvm_gene_llvm_ptr_addr), {Builder.getInt32(llvm::NVPTX::cvta_local_yes_64)}); Builder.CreateCall( Intrinsic::getDeclaration(F->getParent(), Intrinsic::nvvm_gene_llvm_ptr_addr), {Builder.getInt32(llvm::NVPTX::cvta_local_yes)}); Builder.CreateCall( Intrinsic::getDeclaration(F->getParent(), Intrinsic::nvvm_gene_llvm_ptr_addr), {Builder.getInt32(llvm::NVPTX::MOV_DEPOT_ADDR_64)}); Builder.CreateCall( Intrinsic::getDeclaration(F->getParent(), Intrinsic::nvvm_gene_llvm_ptr_addr), {Builder.getInt32(llvm::NVPTX::MOV_DEPOT_ADDR)}); Builder.CreateCall( Intrinsic::getDeclaration(F->getParent(), Intrinsic::nvvm_gene_llvm_ptr_addr), {Builder.getInt32(llvm::NVPTX::VRFrame)}); Builder.CreateCall( Intrinsic::getDeclaration(F->getParent(), Intrinsic::nvvm_gene_llvm_ptr_addr), {Builder.getInt32(llvm::NVPTX::VRFrameLocal)}); }" LLVM,NVPTX,119,bool hasFP(const Function &F) { if (F.getCallingConv() == CallingConv::NVPTX || F.getCallingConv() == CallingConv::NVPTX) { return true; } return false; } LLVM,NVPTX,120, MachineBasicBlock::iterator I) { if (!(MF.getTarget().getMCAsmInfo()->usesCFIForEH() && MF.getTarget().getMCAsmInfo()->usesPIC())) return; if (I->getOpcode() == NVPTX::PROLOG_LABEL) { I->eraseFromParent(); if (next(I)->getOpcode() == NVPTX::CALLSEQ_START) { next(I)->eraseFromParent(); } } else if (I->getOpcode() == NVPTX::RETI) { I->eraseFromParent(); if (prev(I)->getOpcode() == NVPTX::CALLSEQ_END) { prev(I)->eraseFromParent(); } } } LLVM,NVPTX,121,"bool emitPrologue( LLVM::Function func, LLVM::Module mod ) { if (LLVM::TargetRegistry::lookupTarget(""nvptx"", mod.getTargetTriple(), Error) != nullptr) { return true; } return false; }" LLVM,NVPTX,122, LLVM,NVPTX,123, LLVM,NVPTX,124,"void EmitPrologue(Function *F, MachineFunction &MF) { MachineBasicBlock &MBB = MF.front(); MachineBasicBlock::iterator MI = MBB.begin(); BuildMI(MBB, MI, DebugLoc(), TII->get(NVPTX::cvta_local_yes_64)); BuildMI(MBB, MI, DebugLoc(), TII->get(NVPTX::cvta_local_yes)); BuildMI(MBB, MI, DebugLoc(), TII->get(NVPTX::MOV_DEPOT_ADDR_64)); BuildMI(MBB, MI, DebugLoc(), TII->get(NVPTX::MOV_DEPOT_ADDR)); BuildMI(MBB, MI, DebugLoc(), TII->get(NVPTX::VRFrame)); BuildMI(MBB, MI, DebugLoc(), TII->get(NVPTX::VRFrame)); BuildMI(MBB, MI, DebugLoc(), TII->get(NVPTX::VRFrameLocal)); BuildMI(MBB, MI, DebugLoc(), TII->get(NVPTX::VRFrameLocal)); int64_t StackSize = MF.getInfo()->getStackSize() / 4; MF.getFrameInfo()->IncreaseStackSize(StackSize); int NumLocals = MF.getInfo()->getNumLocalVariables(); for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) { if (!NVPTXParamManager::UseSSA) { if (NVPTXParamManager::isLocalParam(I)) ++NumLocals; } if (I->hasAttribute(Attribute::ByVal)) { const AttributeByVal *ByVal = I->getAttribute(Attribute::ByVal); NumLocals += ByVal->getArraySize(); } }" LLVM,NVPTX,125," int FI) { DebugLoc DL; const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); const NVPTXSubtarget &ST = MF.getSubtarget(); const TargetFrameLowering *TFI = ST.getFrameLowering(); const MachineFrameInfo &MFI = MF.getFrameInfo(); int64_t Offset = MFI.getStackSize() + TFI->getFrameIndexReference(MF, FI); Register BaseReg = NVPTX::VRDepot; if (ST.isComputeABI() && Offset > 0) { BaseReg = NVPTX::VRFrame; Offset += StackOffset; } return std::make_pair(BaseReg, Offset); }" LLVM,NVPTX,126, LLVM,NVPTX,127,"void NVPTXAsmPrinter::emitPrologue() { const TargetSubtargetInfo &STI = MF->getSubtarget(); unsigned FuncInfoSize = STI.getFrameLowering()->getFrameInfoSize(MF); const MachineFunction &MF = getMachineFunction(); const Function &F = MF.getFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetRegisterInfo *TRI = STI.getRegisterInfo(); DebugLoc DL = F.getEntryBlock().begin()->getDebugLoc(); MachineBasicBlock &MBB = MF.front(); MachineBasicBlock::iterator MBBI = MBB.begin(); for (unsigned i = 0; i < NVPTX::Int64RegsRegClass.getNumRegs() - 3; i++) { RegSpillMap[NVPTX::Int64RegsRegClass.getRegister(i)] = 0; } if (STM->getFunctionInfo()->isShrinkWrappingDisabled()) { EmitVisibilityDirective(NVPTX::etext, "".error"", '""', ""Shrink-wrapping not yet supported"", '""'); }" LLVM,NVPTX,128,"void emitPrologue(IRBuilder<> &Builder) { Value *cvta_local_yes_64 = Builder.CreateAlloca(NVPTX); Value *VRFrame = Builder.CreateAlloca(NVPTX); Value *VRFrameLocal = Builder.CreateAlloca(NVPTX); Builder.CreateStore(NVPTX::IMOV64rr, VRFrameLocal); Builder.CreateStore(NVPTX::VRFrameLocal, VRFrameLocal); Value *VRDepot = Builder.CreateAlloca(NVPTX); Builder.CreateStore(NVPTX::IMOV32rr, VRFrameLocal); Builder.CreateStore(NVPTX::VRFrameLocal, VRFrameLocal); Builder.CreateStore(NVPTX::IMOV64rr, VRFrame); Builder.CreateStore(VRFrameLocal, VRDepot); Builder.CreateStore(NVPTX::IMOV32rr, VRFrame); Builder.CreateStore(VRFrameLocal, VRDepot); Builder.CreateStore(NVPTX::IMOV64rr, VRFrame); Builder.CreateStore(VRDepot, VRFrame); Builder.CreateStore(NVPTX::IMOV32rr, VRFrame); Builder.CreateStore(VRDepot, VRFrame); Builder.CreateStore(NVPTX::cvta_local_yes, cvta_local_yes_64); }" LLVM,NVPTX,129,"void emitEpilogue(IRBuilder<>& builder, Function& F, Value* NVPTX) { Instruction* currentInsertPt = builder.GetInsertPoint(); BasicBlock* epilogueBlock = BasicBlock::Create(builder.GetInsertBlock(), ""epilogue"", &F); builder.SetInsertPoint(epilogueBlock); builder.SetInsertPoint(currentInsertPt); epilogueBlock->appendToEnd(&F); }" LLVM,NVPTX,130,"void emitPrologue(Function *F) { Module *M = F->getParent(); const TargetLowering *TLI = M->getSubtarget().getTargetTriple(); const NVPTXTargetLowering *NVPTXTLI = static_cast(TLI); BasicBlock *Entry = &F->getEntryBlock(); for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end(); AI != AE; ++AI) { Value *Arg = AI; Value *VRFrameLocal = Entry->getFirstInsertionPt()->insertBefore(NVPTXTLI->EmitVRFrameLocalEntry(Arg)); Value *VRDepot = Entry->getFirstInsertionPt()->insertBefore(NVPTXTLI->EmitVRDepotEntry(Arg)); Entry->getFirstInsertionPt()->insertBefore(NVPTXTLI->EmitIMOV64rr(VRFrameLocal, VRDepot)); Entry->getFirstInsertionPt()->insertBefore(NVPTXTLI->EmitIMOV32rr(VRFrameLocal, VRDepot)); } Value *VRFrame = Entry->getFirstInsertionPt()->insertBefore(NVPTXTLI->EmitVRFrameEntry()); Value *VRDepot = Entry->getFirstInsertionPt()->insertBefore(NVPTXTLI->EmitVRDepotepilog()); Entry->getFirstInsertionPt()->insertBefore(NVPTXTLI->EmitIMOV64rr(VRFrame, VRDepot)); Entry->getFirstInsertionPt()->insertBefore(NVPTXTLI->EmitIMOV32rr(VRFrame, VRDepot)); }" LLVM,NVPTX,131, LLVM,NVPTX,132,"bool eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) { if (!MF.getTarget().useCallFrameSetupDestroy()) return false; const NVPTXSubtarget &Subtarget = MF.getSubtarget(); unsigned RegSize = Subtarget.getRegisterInfo()->getStackSize(); if (!MI->isCallFrameSetup() && !MI->isCallFrameDestroy()) return false; if (MI->isCallFrameSetup()) BuildMI(MBB, MI, MI->getDebugLoc(), TII->get(NVPTX::MOVSP), NVPTX::SP).addImm(-RegSize); MI->eraseFromParent(); return true; }" LLVM,NVPTX,133, LLVM,NVPTX,134,"Function emitPrologue(NVPTX, NVPTX, NVPTX::Int64RegsRegClass, NVPTX::cvta_local_yes_64, NVPTX::VRFrame, NVPTX::MOV_DEPOT_ADDR_64, NVPTX::Int32RegsRegClass, NVPTX::cvta_local_yes, NVPTX::VRFrame, NVPTX::MOV_DEPOT_ADDR){ NVPTX_emitPrologue(NVPTX, NVPTX, NVPTX::Int64RegsRegClass, NVPTX::cvta_local_yes_64, NVPTX::VRFrame, NVPTX::MOV_DEPOT_ADDR_64, NVPTX::Int32RegsRegClass, NVPTX::cvta_local_yes, NVPTX::VRFrame, NVPTX::MOV_DEPOT_ADDR); }" LLVM,NVPTX,135, int &Offset) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); Offset = MFI.getObjectOffset(FrameIndex); BaseReg = NVPTX::VRDepot; Offset += MFI.getStackSize(); } LLVM,NVPTX,136, LLVM,NVPTX,137,"void getAnalysisUsage(AnalysisUsage &AU) const { } Subclasses that override this function should call the base class implementation as follows: FunctionPass::getAnalysisUsage(AU); This will ensure that any default analysis passes required by NVPTX backend are included. Next, the function should call the addRequiredPass function for each analysis pass that is required by the backend. This is done by passing in a pointer to the analysis pass as the first argument, and a boolean value indicating whether the pass should be preserved during optimization as the second argument. For example, to add the AnalysisPass1 required by NVPTX backend, the following code should be used: AU.addRequired(true); Similarly, to add the AnalysisPass2 required by NVPTX backend, the following code should be used: AU.addRequired(true); This ensures that the analysis passes are properly configured and made available to the backend during compilation. Finally, the function should also call the setPreservesAll function to indicate that the NVPTX backend does not invalidate any analysis results. This is done as follows: AU.setPreservesAll(); This completes the implementation of the getAnalysisUsage function for NVPTX backend of LLVM Compiler. The complete code is shown below: void getAnalysisUsage(AnalysisUsage &AU) const { FunctionPass::getAnalysisUsage(AU); AU.addRequired(true); AU.addRequired(true); AU.setPreservesAll(); }" LLVM,NVPTX,138,"void runOnModule(Module &M) override { outs() << ""Target specific value for NVPTX backend: Expected it to be empty by now\n""; llvm::LLVMTargetMachine *TargetMachine = new LLVMTargetMachine(); TargetMachine->generateCode(M); }" LLVM,NVPTX,139,void NVPTXTargetMachine::getAnalysisUsage(AnalysisUsage &AU) const { NVPTXTargetMachine::Parent::getAnalysisUsage(AU); AU.addRequired(); AU.addRequired(); AU.addPreserved(); AU.addPreserved(); } LLVM,NVPTX,140,bool runOnFunction(llvm::Function *F){ } LLVM,NVPTX,141,bool runOnFunction(Function &F) { Module *M = F.getParent(); LLVMContext &Context = M->getContext(); bool Modified = false; for (BasicBlock &BB : F) { for (Instruction &I : BB) { if (auto *CI = dyn_cast(&I)) { Function *Callee = CI->getCalledFunction(); if (Callee->getIntrinsicID() == Intrinsic::nvvm_istypep_sampler || Callee->getIntrinsicID() == Intrinsic::nvvm_istypep_surface || Callee->getIntrinsicID() == Intrinsic::nvvm_istypep_texture) { Modified = true; } } } } return Modified; } LLVM,NVPTX,142,"bool runOnFunction(Function &F) override { LLVMContext &Context = F.getContext(); auto *TM = getAnalysisIfAvailable(); if (F.getCallingConv() == CallingConv::NVPTXKernel) { NVVMIntrinsics* NVVM = getAnalysisIfAvailable(); for (auto &I : instructions(F)) { if (auto *CI = dyn_cast(&I)) { Function *callee = CI->getCalledFunction(); if (callee && NVVM && NVVM->getIntrinsicID(callee) != Intrinsic::not_intrinsic) { Intrinsic::ID ID = NVVM->getIntrinsicID(callee); if (ID == Intrinsic::nvvm_istypep_sampler || ID == Intrinsic::nvvm_istypep_surface || ID == Intrinsic::nvvm_istypep_texture) { CI->addOperand(ConstantInt::get(Type::getInt32Ty(Context), 0)); CI->setIsTargetSpecific(); } } } } } return false; }" LLVM,NVPTX,143,"Function* runOnFunction(Function& F) { if (F.getTarget() != NVPTX) { llvm_unreachable(""Invalid target!""); } Value* NVPTXTargetVal = F.getTarget()->getNVPTXTargetValue(); return &F; }" LLVM,NVPTX,144,"void printInst(const MCInst &Inst, raw_ostream &OS, StringRef NVPTX) { const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); uint64_t Addr = Inst.getLoc().getAddress(); uint16_t Enc = Desc.OpInfo[0].RegClass; const Module *M = MF->getModule(); OS << M->getTarget()->getName() << ""\t"" << getInstName(Inst) << "" ""; unsigned NumOpInfos = MII.getNumOpInfos(Inst); for (unsigned i = 0; i != NumOpInfos; ++i) { const MCOperand &Op = Inst.getOperand(i); if (Op.isValid()) { if (i != 0) OS << "", ""; printOp(Op, OS, &Desc); } } OS << ""\n""; }" LLVM,NVPTX,145,"void printRegName(unsigned int reg) { switch (reg) { case 0: printf(""%p"", reg); break; case 1: printf(""%rs"", reg); break; case 2: printf(""%r"", reg); break; case 3: printf(""%rd"", reg); break; case 4: printf(""%f"", reg); break; case 5: printf(""%fd"", reg); break; case 6: printf(""%h"", reg); break; case 7: printf(""%hh"", reg); break; default: printf(""Bad virtual register encoding""); } }" LLVM,NVPTX,146,"void printInst(const MCInst *MI, raw_ostream &OS, StringRef NVPTX = ""NVPTX"") { MI->print(OS, NVPTX); }" LLVM,NVPTX,147, LLVM,NVPTX,148,"void printRegName(uint8_t Reg) { switch(Reg) { case 0: printf(""%p""); break; case 1: printf(""%rs""); break; case 2: printf(""%r""); break; case 3: printf(""%rd""); break; case 4: printf(""%f""); break; case 5: printf(""%fd""); break; case 6: printf(""%f""); break; default: printf(""Bad virtual register encoding""); break; } }" LLVM,NVPTX,149," const MCRegisterInfo *MRI) { uint64_t Opcode = MI->getOpcode(); StringRef Name = MII->getName(Opcode); OS << format(""0x%04"" PRIx64 "": "", Address) << Name; if (IsMalformed) { OS << "" \n""; return; }" LLVM,NVPTX,150,"void printRegName(int regNum){ switch(regNum){ case 0: printf(""%p""); break; case 1: printf(""%rs""); break; case 2: printf(""%r""); break; case 3: printf(""%rl""); break; case 4: printf(""%f""); break; case 5: printf(""%fl""); break; case 6: printf(""%0X"",0x0FFFFFFF); break; default: printf(""Bad virtual register encoding""); } }" LLVM,NVPTX,151, LLVM,NVPTX,152, LLVM,NVPTX,153, LLVM,NVPTX,154, LLVM,NVPTX,155, LLVM,NVPTX,156, LLVM,NVPTX,157, LLVM,NVPTX,158," const DebugLoc &DL, int64_t ) { assert(!MBB.empty() && ""insertBranch expects non-empty basic block""); assert(BlockHasNoFallThrough(MBB) && ""insertBranch must not be told to insert a fallthrough""); MCContext &Context = MBB.getParent()->getContext(); MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(NVPTX::GOTO)); assert(Cond.size() == 2 && ""NVPTX branch conditions have two components!""); MIB.addReg(Cond[0].getReg(), getRegState(Cond[0])); MIB.addReg(Cond[1].getReg(), getRegState(Cond[1])); MIB.addMBB(Target); MIB.addMBB(Fallthrough); MBB.insert(MBB.end(), MIB); BB.addOperand(MachineOperand::CreateImm(NVPTX::CBranch)); BB.addOperand(MachineOperand::CreateImm(0)); BB.addOperand(MachineOperand::CreateImm(NVPTX::GOTO)); BB.addOperand(MachineOperand::CreateImm(2)); }" LLVM,NVPTX,159,void removeBranch(MachineBasicBlock &MBB) { MachineInstr *LastInst = MBB.getLastNonDebugInstr(); if (LastInst && LastInst->isBranch()) { unsigned NumOps = LastInst->getNumOperands(); if (NumOps >= 4 && LastInst->getOperand(0).isImm() && LastInst->getOperand(0).getImm() == (unsigned int)NVPTX::GOTO && LastInst->getOperand(1).isImm() && LastInst->getOperand(1).getImm() == (unsigned int)NVPTX::CBranch && LastInst->getOperand(2).isImm() && LastInst->getOperand(2).getImm() == 0 && LastInst->getOperand(3).isImm() && LastInst->getOperand(3).getImm() == 1) { MBB.erase(LastInst); } } } LLVM,NVPTX,160, LLVM,NVPTX,161, LLVM,NVPTX,162,"void copyPhysReg(const NVPTXInstrInfo *TII, unsigned Src, unsigned Dst, const MachineRegisterInfo &MRI) { const TargetRegisterClass *SrcRC = MRI.getRegClass(Src); const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); if (SrcRC != DstRC) { if (TII->isCrossClassCopy(SrcRC, DstRC)) { unsigned SrcOp = TII->getCopyFromRegOpcode(DstRC); unsigned DstOp = TII->getCopyToRegOpcode(SrcRC); if (SrcOp == 0 || DstOp == 0) { report_fatal_error(""Attempted to create cross-class register copy""); } MachineBasicBlock &MBB = *MRI->getVRegDef(Src)->getParent(); const TargetRegisterClass *TmpRC = TII->getCrossCopyRegClass(DstRC); unsigned TmpReg = MRI->createVirtualRegister(TmpRC); BuildMI(MBB, TII->get(SrcOp), TmpReg).addReg(Src); BuildMI(MBB, TII->get(DstOp), Dst).addReg(TmpReg); } else { report_fatal_error(""Bad register copy""); } }" LLVM,NVPTX,163, LLVM,NVPTX,164, LLVM,NVPTX,165, LLVM,NVPTX,166,"StringRef getPassName() { return ""NVPTX DAG->DAG Pattern Instruction Selection""; }" LLVM,NVPTX,167,"bool runOnMachineFunction(Function &F) { const TargetMachine &TM = getFunctionTargetMachine(); const TargetSubtargetInfo *STI = TM.getSubtargetImpl(); MachineFunction &MF = MMI->getOrCreateMachineFunction(F); const TargetLowering *TLI = MF.getTarget().getTargetLowering(); MF.setTargetLowering(TLI); if (!MF.getInfo()) MF.setInfo(new MachineFunctionInfo()); MachineFrameInfo &MFI = MF.getFrameInfo(); unsigned StackAlign = TLI->getPrefStackAlignment(); NVPTX::MachineFunctionInfo *NVPTXFI = MF.getInfo(); assert(!NVPTXFI->usesHWStack().isValid() && ""More than one CUDA Reg Ballard Affects Stack""); bool needsStackRealignment = MFI.ensureMaxAlignment(StackAlign); MF.getRegInfo().setRequiresStackRealignment(needsStackRealignment); MFI.setStackSize(RC.getStackAlignment()); if (STI->needsStackRealignment(MF)) { needsStackRealignment = true; MFI.setStackRealignment(TLI->getPrefStackAlignment()); } MFI.setHasStackRealignment(needsStackRealignment); const Function *F = MMI->getMachineFunction().getFunction(); if (F->hasGC()) MF.setCallInfo(MMI->getCallInfo(F)); if (!TM.addPassesToEmitFile(PM, OS, IS_EMIT_MODE, false)) { if (!TM.addPassesToEmitFile(PM, OS, IS_EMIT_MODE, false)) { assert(!""Failed to get an emitter for target""); } } return true; }" LLVM,NVPTX,168,"const char * getPassName() { return ""NVPTX DAG->DAG Pattern Instruction Selection""; } int main() { errs() << ""Pass Name: "" << getPassName() << ""\n""; return 0; }" LLVM,NVPTX,169, LLVM,NVPTX,170, LLVM,NVPTX,171,"MachineSDNode *Select(MachineSDNode *N) { switch (N->getOpcode()) { default: llvm_unreachable(""unexpected node type""); case ISD::LOAD: { EVT DataVT = N->getValueType(0); switch (DataVT.getSimpleVT().SimpleTy) { case MVT::i32: return SelectLoadV4(N); case MVT::i64: return SelectLoadV8(N); case MVT::f32: return SelectLoadV2(N); case MVT::f64: return SelectLoadV4(N); default: llvm_unreachable(""unexpected DataVT for Load node""); } } case ISD::STORE: { EVT DataVT = N->getValueType(0); MachineSDNode *Chain = N->getOperand(1).getNode(); switch (DataVT.getSimpleVT().SimpleTy) { case MVT::i32: return SelectStoreV4(N, Chain); case MVT::i64: return SelectStoreV8(N, Chain); case MVT::f32: return SelectStoreV2(N, Chain); case MVT::f64: return SelectStoreV4(N, Chain); default: llvm_unreachable(""unexpected DataVT for Store node""); } }" LLVM,NVPTX,172,"SDValue SelectInlineAsmMemoryOperand(SDValue Address, const ConstraintInfo *Constraint, SelectionDAG &DAG) { SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue TargetAddr = GetTargetAddress(Address, Constraint->getTargetFlags(), DAG); SDValue Ops[] = {TargetAddr}; SDNode *ResNode = DAG.getMachineNode(NVPTX::NVPTX, DL, VTs, Ops); return SDValue(ResNode, 0); }" LLVM,NVPTX,173, LLVM,NVPTX,174," Instruction *InsertBefore) { if (AM->getTargetMemoryAddress() != NVPTX_ADDR_MODE_TARGET) return nullptr; std::string Constraint = AM->getAsString(); if (Constraint.substr(0, 5) != ""NVPTX"") return nullptr; StringRef AMConstraint = Constraint.substr(6); StringRef MVTString = AMConstraint.substr(0, AMConstraint.find_first_of(',')); AMConstraint = AMConstraint.substr(AMConstraint.find_first_of(',') + 1); uint64_t Size = AMConstraint.empty() ? Ty->getPrimitiveSizeInBits() : AMConstraint.getAsInteger(0, 10); if (MVTString != ""i32"") return nullptr; if (Size != 32) return nullptr; NVPTXAddressSpace AddrSpace = NVPTX::NVPTX_GENERIC; return CreateNVPTXGenericAddrMode(AddrSpace, Base, Index, Disp, InsertBefore); }" LLVM,NVPTX,175,"void Select(Node *N) { if (N->getOpcode() == ISD::LOAD) { TargetMachine *TM = N->getTargetMachine(); unsigned NVPTX = TM->getTargetID(); unsigned LOAD = ISD::LOAD; MachineNode *M = new MachineNode(NVPTX, LOAD); N->transformTo(M); } else if (N->getOpcode() == ISD::STORE) { TargetMachine *TM = N->getTargetMachine(); unsigned NVPTX = TM->getTargetID(); unsigned STORE = ISD::STORE; MachineNode *M = new MachineNode(NVPTX, STORE); N->transformTo(M); } else { return; } }" LLVM,NVPTX,176,"std::string getPassName() { return ""NVPTX "" + ""NVPTX DAG->DAG Pattern Instruction Selection""; }" LLVM,NVPTX,177,"static SDNode* Select(SDNode* N) { EVT VT = N->getValueType(0); SDLoc DL(N); unsigned Opc = N->getOpcode(); switch (Opc) { case ISD::LOAD: if (VT == MVT::v2i32 || VT == MVT::v2f32) { return DAG.getMachineNode(NVPTX::LoadV2, DL, VT, N->getOperand(0)); } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { return DAG.getMachineNode(NVPTX::LoadV4, DL, VT, N->getOperand(0)); } break; case ISD::STORE: if (VT == MVT::v2i32 || VT == MVT::v2f32) { return DAG.getMachineNode(NVPTX::StoreV2, DL, VT, N->getOperand(0), N->getOperand(1)); } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { return DAG.getMachineNode(NVPTX::StoreV4, DL, VT, N->getOperand(0), N->getOperand(1)); } break; default: return SelectCode(N); } return nullptr; }" LLVM,NVPTX,178,"bool SelectInlineAsmMemoryOperand(Instruction *I, const std::string &Constraint, SelectionDAG &DAG) { if (Constraint.find(""T"") == std::string::npos) return false; if (Constraint.getValueType() != MVT::i32) return false; SDValue Addr = DAG.getTargetConstant(Constraint.getValueType(), SDLoc(I), MVT::i32); Addr = DAG.getNode(NVPTXISD::ADDRESS, SDLoc(I), MVT::i32, DAG.getRegister( , MVT::i32), Addr); MachineSDNode *SDM = cast(I); SDM->setMemRefs(DAG.getMachineFunction(), MachineMemOperand::MOStore, DAG.getMachineNode(Addr, SDLoc(I))); return true; }" LLVM,NVPTX,179, LLVM,NVPTX,180,"MVT::ValueType getPreferredVectorAction(const TargetLowering *TLI, EVT VT, EVT MemVT) { if (TLI->getTargetMachine().getTargetTriple().getArch() == llvm::Triple::nvptx || TLI->getTargetMachine().getTargetTriple().getArch() == llvm::Triple::nvptx64) { if (VT.isSimple() && VT.getSimpleVT().SimpleTy == MVT::i1 && MemVT.isSimple() && MemVT.getSimpleVT().SimpleTy == MVT::v2f16) return MVT::v2i32; } return MVT::v8i32; }" LLVM,NVPTX,181,"llvm::FunctionType* FT = llvm::FunctionType::get(builder->getFloatTy(), {builder->getInt32Ty()}, false); llvm::Function *Func = llvm::Function::Create(FT, llvm::Function::PrivateLinkage, ""getSqrtEstimate"", builder->GetInsertBlock()->getModule()); 3. Create an llvm::BasicBlock* named entryBB inside the getSqrtEstimate function, using builder object: llvm::BasicBlock* entryBB = llvm::BasicBlock::Create(Context, ""entry"", Func); builder->SetInsertPoint(entryBB); 4. Create a switch case statement for NVPTX backend using llvm::Intrinsic ID: switch (ID) { case Intrinsic::nvvm_rsqrt_approx_ftz_f: case Intrinsic::nvvm_rsqrt_approx_f: case Intrinsic::nvvm_rsqrt_approx_d: case Intrinsic::nvvm_sqrt_approx_ftz_f: case Intrinsic::nvvm_sqrt_approx_f: case Intrinsic::nvvm_rcp_approx_ftz_d: }" LLVM,NVPTX,182, LLVM,NVPTX,183, LLVM,NVPTX,184,"enum PreferredLegStrategy { NONE, CUSTOM_STRATEGY, CONSTRAINTED_FP, VEC_ROUNDING, SPILLED_NODE, }; PreferredLegStrategy getPreferredVectorAction(StringRef TargetName, const CallConvLowering *CD, SelectionDAGISel &SI, MVT::SimpleValueType NVT, MVT::SimpleValueType OVT) { if(TargetName == ""NVPTX"" && NVT == MVT::i1 && OVT == MVT::v2f16) { return PreferredLegStrategy::CUSTOM_STRATEGY; } return PreferredLegStrategy::NONE; }" LLVM,NVPTX,185,"bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty) { unsigned int AddrSpace = AM.AddrSpace; unsigned int BaseType = AM.BaseType; int64_t BaseOff = AM.BaseOff; unsigned int Scale = AM.Scale; unsigned int Reg = AM.HasBaseReg ? AM.Base.Reg : 0; if (AddrSpace != NVPTX || BaseType != NVPTX || BaseType != 0 || BaseType != 1) { return false; } if (BaseOff < 0 || Scale < 0 || Reg < 0) { return false; } if (!DL.isLegalInteger(Ty)) { return false; } if (Scale == 0 && Reg == 0) { return false; } return true; }" LLVM,NVPTX,186,"Function *NVPTXTargetLowering::LowerOperation(const Operation &Op, Module *M) { unsigned Opc = Op.getOpcode(); switch (Opc) { case NVPTX: { break; } case ISD::RETURNADDR: { break; } case ISD::FRAMEADDR: { break; } case ISD::GlobalAddress: { break; } case ISD::INTRINSIC_W_CHAIN: { break; } case ISD::BUILD_VECTOR: { break; } case ISD::EXTRACT_SUBVECTOR: { break; }" LLVM,NVPTX,187,"String getConstraintType(char constraint) { switch (constraint) { case 'c': case 'r': case 'd': return ""NVPTX""; case 'm': return ""NVPTX""; case '1': return ""1""; case '0': return ""0""; default: return ""Unknown""; } }" LLVM,NVPTX,188,"MVT::SimpleValueType getPreferredVectorAction(MVT::SimpleValueType VT, const TargetLowering &TLI, EVT VecVT, MVT::SimpleValueType PreferVT, bool TypeLegal) { if(TypeLegal) { return PreferVT; } if(VecVT == MVT::nxv1i1 && PreferVT == MVT::i1) { return MVT::i32; } return PreferVT; }" LLVM,NVPTX,189, LLVM,NVPTX,190,"bool isLegalAddressingMode(const MachineMemOperand *MMO, unsigned AM, LLT Ty, const EVT &VT) { switch (AM) { case AMDGPU::isSuccess: return true; case AMDGPU::isNonSuccess: return false; default: break; } switch(MMO->getPseudoInstruction()) { case NVPTXISD::LOAD: { switch (AM) { case NVPTX::LD_GENERIC: case NVPTX::LD_GLOBAL: case NVPTX::LD_GTID: return true; default: return false; } break; } case NVPTXISD::STORE: { switch (AM) { case NVPTX::ST_GENERIC: case NVPTX::ST_GLOBAL: case NVPTX::ST_GTID: return true; default: return false; } break; } default: llvm_unreachable(""Unknown instruction for isLegalAddressingMode""); break; } return false; }" LLVM,NVPTX,191, LLVM,NVPTX,192,"SDValue NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { } 3. In the body of the function, use a switch statement to handle the different operations: switch (Op.getOpcode()) { case NVPTXISD::NVPTX: break; case ISD::RETURNADDR: break; case ISD::FRAMEADDR: break; case ISD::GlobalAddress: break; } 4. For each operation, you can use the SelectionDAG object to construct the desired output. You can refer to the NVPTXISelLowering.cpp file for examples of how to lower different operations. 5. If a custom lowering is not defined for a particular operation, return the target specific value ""Custom lowering not defined for operation"": default: return DAG.getNode(NVPTXISD::NVPTX, SDLoc(Op), Op->getVTList(), Op->getOperandList()); }" LLVM,NVPTX,193,"if (N->getOpcode() == NVPTX && (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::MUL || N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::AND || N->getOpcode() == ISD::SELECT)) { Now, we use the DAGCombineInfo object to change the type of the target node to the desired target-specific type: DCI.CombineTo(N, some_value); Finally, we return the updated node: return SDValue(N, 0); }" LLVM,NVPTX,194,"void ReplaceNodeResults(SDNode *Node, SmallVectorImpl &Results, SelectionDAG &DAG) { if (!DAG.getTargetLoweringInfo().isTypeLegal(Node->getValueType(0))) { EVT VT = Node->getValueType(0); unsigned OpCode = Node->getOpcode(); const SDValue *Operands = Node->op_begin(); unsigned NumOperands = Node->getNumOperands(); switch (VT.getSimpleVT().SimpleTy) { case MVT::Other: { if (OpCode == ISD::LOAD) { EVT MemVT = VT.getVectorElementType(); SDValue LoadAddr = Operands[1]; SDValue LoadChain = Operands[2]; SDValue LoadOffset = Operands[3]; SDValue NewLoad = DAG.getNode(ISD::Custom, SDLoc(Node), VT, LoadChain, LoadAddr, LoadOffset); Results.push_back(NewLoad); } break; }" LLVM,NVPTX,195, LLVM,NVPTX,196,"MVT::SimpleValueType getSetCCResultType(EVT VT) const { if (VT == MVT::Other) return MVT::Other; if (VT.isVector()) return MVT::getVectorVT(MVT::i1, VT.getVectorNumElements()); return MVT::i1; }" LLVM,NVPTX,197,MVT::SimpleValueType getShiftAmountTy() { return MVT::i32; } LLVM,NVPTX,198,inline unsigned NVPTXTargetLowering::getConstraintType(const char *Constraint) const { switch (Constraint[0]) { case 'c': if (Constraint[1] == '\0') return Imm; if (Constraint[1] == 'c' && (Constraint[2] == '0' || Constraint[2] == '1')) return CIn; break; case 'r': return RegClass; case 'l': return RegList; case 'g': if (Constraint[1] == '\0') return AnyMem; if (Constraint[1] == 't') return GlobalMem; break; } return 0; } LLVM,NVPTX,199, LLVM,NVPTX,200, LLVM,NVPTX,201,if(isMemIntrinsic == true) { } else { } LLVM,NVPTX,202,"if (MI->getConstraintString(OpNo) == ""NVPTX"" || MI->getConstraintString(OpNo) == ""NVPTX,NVPTX,1"") { Ops.addReg(MI->getOperand(OpNo).getReg(), getRegClassForOperand(OpNo, MI, TM, MRI)); }" LLVM,NVPTX,203,"LLVMValueRef LowerOperation(LLVMModuleRef moduleRef, LLVMBasicBlockRef basicBlockRef, LLVMValueRef valueRef, LLVMBuilderRef builderRef, LLVMTargetMachineRef targetMachineRef) { unsigned opcode = LLVMGetInstructionOpcode(valueRef); if (opcode == ISD::RETURNADDR) { LLVMValueRef returnAddress = LLVMBuildCall(builderRef, LLVMNVPTXIntrinsicRetAddr(targetMachineRef), &valueRef, 1, """"); return returnAddress; } if (opcode == ISD::FRAMEADDR) { LLVMValueRef frameAddress = LLVMBuildCall(builderRef, LLVMNVPTXIntrinsicFrameAddr(targetMachineRef), &valueRef, 1, """"); return frameAddress; } if (opcode == ISD::GlobalAddress) { LLVMValueRef globalAddress = LLVMBuildCall(builderRef, LLVMNVPTXIntrinsicAddressOf(targetMachineRef), &valueRef, 1, """"); return globalAddress; } if (opcode == ISD::INTRINSIC_W_CHAIN) { unsigned intrinsicID = LLVMGetIntrinsicID(valueRef); if (intrinsicID == Intrinsic::nvvm_build_vector) { LLVMValueRef loadInst = LLVMBuildLoad(builderRef, valueRef, """"); return loadInst; } } if (opcode == ISD::BUILD_VECTOR) { LLVMValueRef loadInst = LLVMBuildLoad(builderRef, valueRef, """"); return loadInst; } if (opcode == ISD::EXTRACT_SUBVECTOR) { LLVMValueRef loadInst = LLVMBuildLoad(builderRef, valueRef, """"); return loadInst; }" LLVM,NVPTX,204," SelectionDAG &DAG) { assert(!(CallingConv == CallingConv::NVPTX_Device || CallingConv == CallingConv::NVPTX_Host) && ""NVPTX backend does not support device/host calling conventions""); SmallVector LoweredRetVals; SmallVector ReturnOps; bool SetReturnReg = false; for (unsigned i = 0, e = Outs.size(); i != e; ++i) { SmallVector ValueVTs; SmallVector ResultOps; ISD::ArgFlagsTy Flags = Outs[i].Flags; EVT VT = Outs[i].VT; ComputeValueVTs(DAG.getTargetLoweringInfo(), VT, ValueVTs); SDValue RV = DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(F), LoweredRetVals[i], VT); if(Ins[i].VT.isVector()) { for (unsigned j = 0, f = Ins[i].VT.getVectorNumElements(); j != f; ++j) { SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(F), Ins[i].VT.getVectorElementType(), RV, DAG.getIntPtrConstant(j)); Val = DAG.getNode(NVPTXISD::StoreRetval, SDLoc(F), DAG.getVTList(MVT::Other), Val); ResultOps.push_back(Val); }" LLVM,NVPTX,205,"MVT::LegalizeAction LowerVectorLoadStoreAction(TargetLowering::LegalizeAction Action, MVT::SimpleValueType VT) { if (VT != MVT::v2f16) return Action; if ((Action == TargetLowering::Custom) || (Action == TargetLowering::Legal) || (Action == TargetLowering::Promote)) { return TargetLowering::Legal; } else { return TargetLowering::Custom; } } void getPreferredVectorAction(const TargetLowering::LegalizeVectorTypesInfo &LegalizeInfo, MVT::SimpleValueType VT, MVT &PreferredTypeVT) { PreferredTypeVT = MVT::v2f16; if (LegalizeInfo.ElementVT == MVT::i1) { PreferredTypeVT = MVT::v2i1; } if (VT != MVT::v2f16) { PreferredTypeVT = VT; } }" LLVM,NVPTX,206,"bool isFMAFasterThanFMulAndFAdd(TargetMachine *TM) { if (!TM) return false; Triple TargetTriple = TM->getTargetTriple(); if (!TargetTriple.isNVPTX()) return false; SubtargetFeatures Features(TargetTriple.getFeatureString()); if (!Features.hasFeature(""nvptx-fma"")) return false; return true; }" LLVM,NVPTX,207,"const char *getConstraintType(char Constraint) { switch (Constraint) { case 'Q': return ""NVPTX""; case 'Z': return ""NVPTX""; case 'c': return ""NVPTX""; case '1': return ""NVPTX""; case '0': return ""NVPTX""; default: return ""Unknown Constraint Type""; } } int main() { char Constraint = 'Q'; const char *ConstraintType = getConstraintType(Constraint); printf(""Constraint = %c, Constraint Type = %s\n"", Constraint, ConstraintType); return 0; }" LLVM,NVPTX,208, LLVM,NVPTX,209,llvm::MVT getScalarShiftAmountTy() { return llvm::MVT::i32; } LLVM,NVPTX,210,"MVT getSetCCResultType(const Target &NVPTX, MVT::i1 a, MVT::i1 b) { return MVT::i1; }" LLVM,NVPTX,211,"bool isLegalAddressingMode(const TargetRegisterInfo *TRI, const MCInst *LoadStoreInstr, const MachineOperand &Dest, const MachineOperand &Base, const MachineOperand &Offset) { uint64_t tsv = LoadStoreInstr->getOperand(0).getImm(); if (tsv == 0x000000010000001) { bool isDestVectorReg = TRI->isTypeLegalForClass(Dest.getReg(), TRI->getRegClass(Dest.getReg())); bool isBaseVectorReg = TRI->isTypeLegalForClass(Base.getReg(), TRI->getRegClass(Base.getReg())); bool isValidOffset = Offset.isImm(); bool isOffsetWordAligned = (Offset.getImm() % 4 == 0); if (isDestVectorReg && isBaseVectorReg && isValidOffset && isOffsetWordAligned) return true; } return false; }" LLVM,NVPTX,212, LLVM,NVPTX,213, LLVM,NVPTX,214, LLVM,NVPTX,215,"bool shouldCastAtomicLoadInIR(AtomicLoadInst *atomicLoad, const TargetMachine *TM) { if (TM->getTargetTriple().getArch() != Triple::nvptx) return false; Type *type = atomicLoad->getType(); if (!type->isIntegerTy() && !type->isFloatingPointTy()) return false; unsigned size = type->getIntegerBitWidth(); if (size != 8 && size != 16 && size != 32 && size != 64) return false; if (atomicLoad->getPointerOperand()->getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) return false; Instruction *cast = nullptr; switch (size) { case 8: cast = AtomicCmpXchgInst::CreateAcquire(atomicLoad->getPointerOperand(), atomicLoad->getOperand(0), atomicLoad->getOperand(1), atomicLoad->getOrdering(), atomicLoad->getSynchScope()); break; case 16: cast = AtomicRMWInst::CreateAdd(atomicLoad->getPointerOperand(), atomicLoad->getOperand(0), atomicLoad->getOrdering(), atomicLoad->getSynchScope()); break; case 32: cast = AtomicRMWInst::CreateSub(atomicLoad->getPointerOperand(), atomicLoad->getOperand(0), atomicLoad->getOrdering(), atomicLoad->getSynchScope()); break; case 64: cast = AtomicRMWInst::CreateAnd(atomicLoad->getPointerOperand(), atomicLoad->getOperand(0), atomicLoad->getOrdering(), atomicLoad->getSynchScope()); break; default: return false; }" LLVM,NVPTX,216, LLVM,NVPTX,217,"Which will be respected by NVPTX backend only.Example of Target specific value : """"{uint16}, {uint32}, {uint64}, or {uint32, uint32, uint32}.For this function, you will need to create a new class called RegConstraint and then define it as ""class RegConstraint {vector<...> physRegs;}"" The function getRegForInlineAsmConstraint will take in a physical register constraint as a parameter. This constraint will be in the form of a string, where each comma-separated value represents a different aspect of the constraint. The function will then parse this string and create a new RegConstraint object, which will contain a vector of physical registers that match the constraint. The function will first check if the target specific value is valid for the NVPTX backend. If it is not valid, the function will return an empty RegConstraint object. To handle the different target specific values, the function will use a switch statement. For each case, it will extract the relevant information from the constraint string and create a new physical register based on that information. For example, for the {uint16} case, the function will create a new physical register with the appropriate size and type (i.e. uint16). Once all the relevant registers have been created, they will be added to the physRegs vector in the RegConstraint object. Finally, the function will return this RegConstraint object, which can then be used by the NVPTX backend to generate appropriate inline assembly code. The final code for the function will look something like this: ``` RegConstraint getRegForInlineAsmConstraint(StringRef constraint) { if (!isValidForNVPTX(constraint)) { return RegConstraint(); }" LLVM,NVPTX,218, LLVM,NVPTX,219,bool getTgtMemIntrinsic(unsigned IntrinsicID) { switch (IntrinsicID) { case Intrinsic::nvvm_atomic_load_add_f32: case Intrinsic::nvvm_atomic_load_inc_32: case Intrinsic::nvvm_atomic_load_dec_32: case Intrinsic::nvvm_ldu_global_i: case Intrinsic::nvvm_ldu_global_f: case Intrinsic::nvvm_ldu_global_p: if (getIntrinsicTarget(IntrinsicID) != Triple::NVPTX) return false; if (IntrinsicID == Intrinsic::nvvm_ldu_global_i || IntrinsicID == Intrinsic::nvvm_ldu_global_p) return true; return false; default: return false; } } LLVM,NVPTX,220,"SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { case ISD::RETURNADDR: { break; } case ISD::FRAMEADDR: { break; } case ISD::GlobalAddress: { break; } case ISD::INTRINSIC_W_CHAIN: { break; } case ISD::BUILD_VECTOR: { break; } case ISD::EXTRACT_SUBVECTOR: { break; } case ISD::CONCAT_VECTORS: { break; } case ISD::STORE: { break; } case ISD::LOAD: { break; } default: { return DAG.getTargetConstant(""Custom lowering not defined for operation"", MVT::Other); } } }" LLVM,NVPTX,221," SelectionDAG &DAG) { MachineFunction &MF = DAG.getMachineFunction(); const Function &F = MF.getFunction(); if (MF.getTarget().getTargetTriple().getArch() != Triple::nvptx) return SDValue(); if (!F.getFunctionType()->isABICompliant()) report_fatal_error(""Non-ABI compilation is not supported""); if (Outs.size() != 1) report_fatal_error(""Bad return value decomposition""); EVT RetVT = Outs[0].VT; if (!RetVT.isScalar()) report_fatal_error(""Invalid vector info""); unsigned RetSize = RetVT.getSizeInBits(); MVT RetMVT = RetVT.getSimpleVT().SimpleTy; if (Outs[0].Flags.isZExt()) { if (Outs[0].Flags.isSExt()) { if (Outs[0].Flags.isAnyExt()) { return DAG.getNode(NVPTXISD::StoreRetval, dl, MVT::Other, Chain, DAG.getNode(ISD::ANY_EXTEND, dl, RetMVT, OutVals[0])); } else { return DAG.getNode(NVPTXISD::StoreRetval, dl, MVT::Other, Chain, DAG.getNode(ISD::SIGN_EXTEND, dl, RetMVT, OutVals[0])); } }" LLVM,NVPTX,222,"MVT getSetCCResultType(NVPTXTargetMachine& TM, EVT VT) { if (VT.isVector()) { MVT ElemVT = VT.getVectorElementType(); return MVT::getVectorVT(ElemVT, VT.getVectorNumElements() / 2); } else { return MVT::i1; } }" LLVM,NVPTX,223,MVT::SimpleValueType getScalarShiftAmountTy(MVT::SimpleValueType ShiftAmountTy) { if (ShiftAmountTy == MVT::i32) { return MVT::i32; } return ShiftAmountTy; } LLVM,NVPTX,224,getSetCCResultType(MVT v) { if v == NVPTX { MVT x = MVT::getVectorVT(v); MVT v = MVT::i1; } else { MVT v = x; } return v; } LLVM,NVPTX,225,switch (N->getOpcode()) { case NVPTXISD::ADD: return combineAdd(N); case NVPTXISD::FADD: return combineFAdd(N); case NVPTXISD::MUL: return combineMul(N); case NVPTXISD::SHL: return combineSHL(N); case NVPTXISD::AND: return combineAND(N); default: return DAGCombine(N); } LLVM,NVPTX,226," SelectionDAG &DAG, const TargetLowering &TLI) { SDLoc DL(N); EVT VT = N->getValueType(0); if (TLI.isTypeLegal(VT)) return; unsigned CustomOpcode = TLI.getOperationAction(N->getOpcode(), VT).Custom; if (CustomOpcode == TargetLowering::Legal) return; if (CustomOpcode == TargetLowering::Custom) { LLVM_DEBUG(dbgs() << ""Unhandled custom legalization\n""); return; } if (CustomOpcode == TargetLowering::UseUndeclared) CustomOpcode = TLI.getOperationAction(N->getOpcode(), VT).Value; if (CustomOpcode >= ISD::BUILTIN_OP_END) return Results.push_back(DAG.getNode(CustomOpcode, DL, VT, N->getOperand(0), N->getOperand(1))); if (TLI.getOperationAction(CustomOpcode, VT).IntrinsicName) { MVT ArgVT = VT.getSimpleVT(); std::vector Ops; for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) { SDValue Op = N->getOperand(i); if (!TLI.isTypeLegal(Op.getValueType())) Op = DAG.getNode(ISD::ANY_EXTEND, DL, TLI.getTypeToExpandTo(ArgVT), Op); Ops.push_back(Op); }" LLVM,NVPTX,227, LLVM,NVPTX,228, LLVM,NVPTX,229, LLVM,NVPTX,230, LLVM,NVPTX,231,"void PerformDAGCombine(SDNode *N) { if (N->getOpcode() == NVPTXISD::NVPTXNode || N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::MUL || N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::AND) { SDValue Op1 = N->getOperand(0); SDValue Op2 = N->getOperand(1); if (Op1.isConstant() || Op2.isConstant()) { ConstantSDNode *Const1 = dyn_cast(Op1); ConstantSDNode *Const2 = dyn_cast(Op2); if (Const1 && Const2) { switch (N->getOpcode()) { case NVPTXISD::NVPTXNode: { break; } case ISD::ADD: { uint64_t Val = Const1->getSExtValue() + Const2->getSExtValue(); SDNode *NewNode = CurDAG->getTargetConstant(Val, N->getValueType(0)); ReplaceNode(N, NewNode); break; } case ISD::FADD: { double Val = Const1->getValueAPF().convertToDouble() + Const2->getValueAPF().convertToDouble(); SDNode *NewNode = CurDAG->getTargetConstantFP(Val, N->getValueType(0)); ReplaceNode(N, NewNode); break; }" LLVM,NVPTX,232,"void LowerOperation(SDNode *N, std::vector &Results, SelectionDAG &DAG) { switch (N->getOpcode()) { case ISD::RETURNADDR: { SDValue Func = N->getOperand(0); SDValue RetAddr = DAG.getNode(NVPTXISD::RETURNADDR, SDLoc(N), Func.getValueType(), Func); Results.push_back(RetAddr); return; } case ISD::FRAMEADDR: { ConstantSDNode *Idx = cast(N->getOperand(0)); SDValue FrameAddr = DAG.getNode(NVPTXISD::FRAMEADDR, SDLoc(N), N->getValueType(0), Idx); Results.push_back(FrameAddr); return; } case ISD::GlobalAddress: { GlobalValue *GV = cast(N)->getGlobal(); SDValue Addr = DAG.getTargetGlobalAddress(GV, SDLoc(N), N->getValueType(0)); Results.push_back(Addr); return; } default: Results.push_back(DAG.getTargetConstant(NVPTX::CustomLoweringOpNotDefined, SDLoc(N), MVT::Other)); return; } }" LLVM,NVPTX,233,"int getFunctionParamOptimizedAlign(string target, int nonLocalLinkage, int defaultAlign) { if (target != ""NVPTX,NVPTX"") { return defaultAlign; } if (nonLocalLinkage == 1) { return 16; } else { return defaultAlign; } }" LLVM,NVPTX,234,bool PerformDAGCombine(MachineBasicBlock &MBB) { if (!MBB.hasNodesWithCodeGen()) { return false; } LLVM,NVPTX,235,"bool isLegalAddressingMode(AddressingMode AM, Type type) { if(AM.getTgtAddrSpace() != NVPTX || AM.getBaseType() != NVPTX || AM.getScale() != 0 || AM.getStep() != 1) return false; if(isLoadSupported(type) || isStoreSupported(type)) { if(isLoadStoreSupported(AM, type)) return true; } return false; }" LLVM,NVPTX,236,"bool shouldExpandAtomicRMWInIR(const llvm::TargetMachine *TM, const llvm::Instruction *I, unsigned int Width) { bool isInteger = I->getType()->isIntegerTy(); if (!isInteger) { errs() << ""Invalid type! Atomic operation supported only for integer types.\n""; return false; } switch (Width) { case 8: case 16: case 32: case 64: return true; default: errs() << ""Unsupported width encountered! Atomic operation supports only 8, 16, 32, or 64 bit integers.\n""; return false; } }" LLVM,NVPTX,237, LLVM,NVPTX,238,"void ReplaceNodeResults(SDNode *Node, SmallVectorImpl &Results, SelectionDAG &DAG) { if (Node->getOpcode() == ISD::INTRINSIC_W_CHAIN && Node->getNumOperands() == 2 && Node->getOperand(0) == Node->getOperand(1)) { SDValue Chain = Node->getOperand(0); SDValue Ptr = Chain.getOperand(0); SDValue Addr = Chain.getOperand(1); if (Ptr.getOpcode() == ISD::LOAD) { EVT LoadType = Ptr.getValueType(); if (LoadType != MVT::i32 && LoadType != MVT::i64) { SDNode *NewNode = DAG.getNode(NVPTXISD::LOAD, SDLoc(Node), MVT::i32, Addr); Results.push_back(SDValue(NewNode, 0)); } } } }" LLVM,NVPTX,239, LLVM,NVPTX,240, LLVM,NVPTX,241, LLVM,NVPTX,242," const SDLoc &dl, SelectionDAG &DAG) const { assert(((CallConv == CallingConv::C) || (CallConv == CallingConv::Fast)) && ""Unsupported calling convention for NVPTX target!""); assert(!isVarArg && ""VarArgs not supported for NVPTX target!""); MVT RetTy = MVT::Other; if (!Outs.empty()) { const ISD::OutputArg &RetArg = Outs[0]; assert((RetArg.VT == MVT::i32) && ""Invalid return type for NVPTX target!""); RetTy = RetArg.VT; } SmallVector ChainNodes(Outs.size() + 1); ChainNodes[0] = Chain; for (unsigned i = 0, e = Outs.size(); i != e; ++i) { ChainNodes[i+1] = DAG.getNode(NVPTXISD::StoreRetval, dl, DAG.getVTList(RetTy), Chain, DAG.getRegister(NVPTX::R0, MVT::i32), OutVals[i]); } ChainNodes.push_back(DAG.getNode(NVPTXISD::MoveToRetval, dl, MVT::Other, ChainNodes.back(), DAG.getRegister(NVPTX::R0, MVT::i32))); ChainNodes.push_back(DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, ChainNodes.back())); Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, ChainNodes); return Chain; }" LLVM,NVPTX,243,"StringRef getRegForInlineAsmConstraint(StringRef PhysRegConstraint) { ... } The function would take in a string representing the physical register constraint for the NVPTX backend and return a string representing the corresponding LLVM constraint. The implementation for this function would be as follows: String getRegForInlineAsmConstraint(StringRef PhysRegConstraint) { String LLVMRegConstraint; if (PhysRegConstraint.startswith(""NVPTX"")) { StringRef RegClass = PhysRegConstraint.split(',').first(); if (RegClass == ""NVPTX::Int1RegsRegClass"") { LLVMRegConstraint = ""%noreg""; } else if (RegClass == ""NVPTX::Int16RegsRegClass"") { LLVMRegConstraint = ""f""; } else if (RegClass == ""NVPTX::Int32RegsRegClass"") { LLVMRegConstraint = ""=r""; } else if (RegClass == ""NVPTX::Int64RegsRegClass"") { LLVMRegConstraint = ""=d""; } else if (RegClass == ""NVPTX::Float32RegsRegClass"") { LLVMRegConstraint = ""=f""; } else if (RegClass == ""NVPTX::Float64RegsRegClass"") { LLVMRegConstraint = ""=d""; } } return LLVMRegConstraint; }" LLVM,NVPTX,244,"bool isLegalAddressingMode(AddressingMode AM, Type *Ty) { if (AM == llvm::AddrMode::Reg) return true; if (Ty->isVectorTy()) return false; if (!AM.BaseOffs || !(AM.BaseOffs->getType()->isIntegerTy() ||AM.BaseOffs->getType()->isFloatingPointTy())) return false; if (AM.Scale >= 0 && !(AM.Scale->getType()->isIntegerTy() ||AM.Scale->getType()->isFloatingPointTy())) return false; if (AM.Scale != 1 && AM.Scale != 4) return false; return true; }" LLVM,NVPTX,245,"The function should return a string representing the LLVM constraint code for that particular register constraint. For example, if the input is ""NVPTX::Int32RegsRegClass"", the function should return ""{reg}"". The steps to create this function are as follows: 1. First, we need to understand the structure of the input string given to us. It contains multiple arguments separated by commas. Each argument represents a particular aspect of the register constraint. 2. The first argument ""NVPTX"" represents the target specific value. This can be ignored as it is same for all register constraints in NVPTX backend. 3. The second argument ""NVPTX"" represents the register class name. This will be the LLVM constraint code we need to return. We can directly return this as it is in the correct format. 4. The third argument ""1"" represents the register class size in bits. This can also be ignored as it is already specified in the register class name. 5. The remaining arguments are optional and we can use them to specify additional features of the register constraint. In this case, we have four optional arguments - ""0U,NVPTX::Int8RegsRegClass"", ""0U,NVPTX::Int16RegsRegClass"", ""0U,NVPTX::Int32RegsRegClass"", ""0U,NVPTX::Int64RegsRegClass"" representing the sub-register indices for different register class types. We can use these to create a mapping between the sub-register index and the corresponding LLVM constraint code. 6. Finally, we need to handle the case when the input string does not match any of the known register constraints. We can return an empty string in this case. With these steps in mind, the implementation of the function would look something like this: ``` std::string getRegForInlineAsmConstraint(std::string constraint) { std::stringstream ss(constraint); std::string token; std::vector args; while (getline(ss, token, ',')) { args.push_back(token); }" LLVM,NVPTX,246, LLVM,NVPTX,247,"bool combineRepeatedFPDivisors(TargetTransformInfo *TTI, bool FPOrd) { if (TTI->getTargetTriple().getObjectFormat() != Triple::NVPTX || TTI->getTargetTriple().getArch() != Triple::NVPTX || TTI->getTargetPointerWidth(0) != 32 || !TTI->getTargetSpecificInfo().getFloatDivIsCheap()) return true; if (FPOrd) return false; return true; }" LLVM,NVPTX,248,"bool enableAggressiveFMAFusion(NVPTXTargetMachine &TM, EVT VT) { if (!VT.isVector()) return false; const NVPTXSubtarget &ST = TM.getSubtarget(); if (!ST.hasFMAF()) return false; return true; }" LLVM,NVPTX,249,Type *getScalarShiftAmountTy() { Type *TargetTy = MVT::i32; return TargetTy; } LLVM,NVPTX,250,"MVT::SimpleValueType getSetCCResultType(ISD::CondCode CC, const DataLayout &Layout) { if (Layout.getTargetTriple().getArch() == Triple::nvptx) { switch (CC) { case ISD::SETCC: return MVT::i1; default: llvm_unreachable(""Unknown comparison condition code!""); } } else { return MVT::i1; } }" LLVM,NVPTX,251,bool isCheapToSpeculateCtlz(unsigned int targetSpecific) { if (targetSpecific == NVPTX) { return true; } else { return false; } } LLVM,NVPTX,252,"bool isFMAFasterThanFMulAndFAdd() { Module *M = currentModule; const char* Target = ""nvptx""; Triple TargetTriple = Triple(M->getTargetTriple()); if (TargetTriple.isNVPTX()) { std::string FMAFasterParam = ""fma-faster-than-fmul-and-fadd""; const StringMap &Attrs = M->getTargetAttributes(); if (Attrs.find(""nvptx"") != Attrs.end() && Attrs[""nvptx""].hasAttribute(FMAFasterParam)) { std::string AttrValue = Attrs[""nvptx""].getAttribute(FMAFasterParam).getValueAsString(); if (AttrValue == ""true"") { return true; } } } return false; }" LLVM,NVPTX,253,"bool isTruncateFree(Type* Ty1, Type* Ty2) { if (target_specific_value != NVPTX,64,32) return false; if (sizeOf(Ty1) <= sizeOf(Ty2)) return false; return true; }" LLVM,NVPTX,254,"std::string getPassName() { return ""NVPTX LowerAggregateCopiesAndIntrinsicsIntoLoops""; }" LLVM,NVPTX,255,void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); AU.addRequired(); AU.addPreserved(); } LLVM,NVPTX,256,"void runOnFunction(Function &F) override { errs() << ""Running on Function: "" << F.getName() << ""\n""; F.addFnAttr(""target-cpu"", ""NVPTX""); F.addFnAttr(""target-features"", ""NVPTX,NVPTX,4,4,0,0,0,0,1""); F.dump(); }" LLVM,NVPTX,257,"std::string getPassName() { return ""NVPTX Lower Aggregate Copies/Intrinsics into Loops""; }" LLVM,NVPTX,258,"namespace llvm { class NVPTX : public FunctionPass { public: static char ID; NVPTX() : FunctionPass(ID) {} virtual bool runOnFunction(Function &F) = 0; }; } ``` Next, we will create a source file for the NVPTX pass called ""NVPTX.cpp"". This file will contain the implementation of the runOnFunction function. ```cpp #include ""NVPTX.h"" using namespace llvm; char NVPTX::ID = 0; bool NVPTX::runOnFunction(Function &F) { return false; } ``` Finally, we will create a subclass of the NVPTX pass, let's call it ""MyNVPTXPass"", which overrides the runOnFunction function to perform specific optimizations. ```cpp #include ""NVPTX.h"" using namespace llvm; struct MyNVPTXPass : public NVPTX { static char ID; MyNVPTXPass() : NVPTX() {} virtual bool runOnFunction(Function &F) { return true; }" LLVM,NVPTX,259,"void runOnFunction(Function &F) override { if (F.getCallingConv() == CallingConv::PTX_Kernel) { return; } TargetMachine &TM = getAnalysis(); const DataLayout &DL = F.getParent()->getDataLayout(); Module &M = *F.getParent(); DebugLoc DL; FunctionType *FTy = FunctionType::get(Type::getVoidTy(F.getContext()), false); AttributeSet EmptySet; Function *Helper = Function::Create(FTy, GlobalVariable::ExternalLinkage, ""__nvvm_reflect"", &M); Helper->setAttributes(EmptySet); BasicBlock *BB = BasicBlock::Create(F.getContext(), ""prologue"", Helper); IRBuilder<> Builder(BB); SmallVector Params; Params.push_back(Type::getInt8PtrTy(F.getContext())); Params.push_back(Type::getInt8PtrTy(F.getContext())); Params.push_back(Type::getInt8PtrTy(F.getContext())); Params.push_back(Type::getInt32Ty(F.getContext())); Params.push_back(Type::getInt32Ty(F.getContext())); Params.push_back(Type::getInt32Ty(F.getContext())); Params.push_back(Type::getInt32Ty(F.getContext())); Params.push_back(Type::getInt32Ty(F.getContext())); Params.push_back(Type::getInt32Ty(F.getContext())); Function *Reflect = Intrinsic::getDeclaration(&M, Intrinsic::nvvm_reflect, Params); Value *Params[9] = {Builder.CreatePointerCast(Helper, Builder.getInt8PtrTy()), Builder.CreatePointerCast(&F, Builder.getInt8PtrTy()), TM.getSubtargetImpl()->getTargetTriple().c_str(), Builder.getInt32(TM.getRM()), Builder.getInt32(TM.getOptLevel()), Builder.getInt32(TM.getOptSize() != CodeGenOpt::None), Builder.getInt32(TM.getSoftAlign() != CodeGenOpt::None), Builder.getInt32(0), Builder.getInt32(0)}; Builder.CreateCall(Reflect, Params); Builder.CreateRetVoid(); Helper->addFnAttr(Attribute::OptimizeNone); if (!F.isDeclaration()) { BasicBlock *BB = &F.getEntryBlock(); Instruction *I = &BB->front(); Builder.SetInsertPoint(I); Builder.CreateCall(Helper, """"); }" LLVM,NVPTX,260, LLVM,NVPTX,261,"std::string getPassName() { return ""NVPTX - Convert Address Space of Alloca'ed Memory to Local""; }" LLVM,NVPTX,262,"StringRef getPassName() const override { return ""Convert Address Space Pass""; }" LLVM,NVPTX,263,"void NVPTXTargetMachine::runOnFunction(Function &F) { StringRef TargetTriple = ""NVPTX""; StringRef CPU = ""NVPTX""; StringRef TuneCPU = """"; StringRef FS = """"; unsigned OptLevel = 0; unsigned SizeLevel = 0; std::unique_ptr TM( TheTarget->createTargetMachine(TargetTriple, CPU, TuneCPU, FS, TargetOptions(), RelocModel)); PassManager PM; PM.add(createTargetLibraryInfoWrapperPass(TM->getTargetLibraryInfo())); PM.add(createNVPTXLowerAggrCopiesPass()); PM.add(createNVPTXLowerStructArgsPass()); PM.add(createNVPTXAllocaHoistingPass()); PM.add(createNVPTXAssignValidGlobalNamesPass()); PM.add(createNVPTXCallLoweringPass()); PM.add(createNVPTXLowerKernelArgsPass()); PM.add(createNVPTXLowerKernelAttributesPass()); PM.add(createNVPTXCombineMemoryOpsPass()); PM.add(createNVPTXPrepareBranchPass()); PM.add(createNVPTXAssignPTXPass()); PM.add(createNVPTXFavorNonGenericAddrSpacesPass()); PM.add(createNVPTXLowerSelectPass()); PM.run(F); F.setTargetTriple(TargetTriple); }" LLVM,NVPTX,264,"bool runOnFunction(Function &F) override { const std::string targetName = ""NVPTX""; const std::string targetFeatures = ""NVPTX""; const std::string targetAttributes = """"; unsigned int targetAlignment = 0; unsigned int targetSize = 0; outs() << ""Function Name: "" << F.getName() << ""\n""; outs() << ""Target: "" << targetName << ""\n""; outs() << ""Target Features: "" << targetFeatures << ""\n""; outs() << ""Target Attributes: "" << targetAttributes << ""\n""; outs() << ""Target Alignment: "" << targetAlignment << ""\n""; outs() << ""Target Size: "" << targetSize << ""\n""; return true; }" LLVM,NVPTX,265,"function runOnFunction(function) { superclass.runOnFunction(function); NVPTX.processFunction(function); function.target = ""NVPTX""; function.targetSpecificValue1 = ""NVPTX""; function.targetSpecificValue2 = """"; function.targetSpecificValue3 = """"; function.targetSpecificValue4 = 0; function.targetSpecificValue5 = 0; }" LLVM,NVPTX,266, LLVM,NVPTX,267,"StringRef getPassName() const override { return ""Lower pointer arguments of CUDA kernels""; }" LLVM,NVPTX,268,bool runOnFunction(Function &F) override { return true; } LLVM,NVPTX,269,"string getPassName() { string target = ""Lower pointer arguments of CUDA kernels""; string passName = ""LowerPointers""; passName += "" ("" + target + "")""; return passName; }" LLVM,NVPTX,270, LLVM,NVPTX,271,"bool runOnFunction(Function &F) override { if (!F.hasFnAttribute(NVPTX::PTXF)) { return false; } NVPTXTargetMachine &TM = static_cast(getTM(F)); const DataLayout &DL = M.getDataLayout(); LLVMContext &Context = F.getContext(); Module &M = *F.getParent(); if (TM.getTargetTriple().getArch() == Triple::nvptx64 || TM.getTargetTriple().getArch() == Triple::nvptx) { for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) { BasicBlock &BB = *I; for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE; ++BI) { Instruction *I = &*BI; if (LoadInst *LI = dyn_cast(I)) { LI->setVolatile(true); } } } return true; }" LLVM,NVPTX,272,"std::string getPassName(llvm::FunctionPass *FP) { llvm::StringRef Name = llvm::StringRef(""NVPTX""); llvm::TargetPassConfig *TPC = llvm::dyn_cast(FP); if (TPC) { llvm::TargetMachine *TM = &const_cast(TPC->getTM()); if (TM->getTargetTriple().getArch() == llvm::Triple::nvptx || TM->getTargetTriple().getArch() == llvm::Triple::nvptx64) { llvm::StringRef ArgVal = llvm::StringRef(""Copy structure (byval *) arguments to stack""); Name = Name + "" - "" + ArgVal; } } return Name.str(); }" LLVM,NVPTX,273,"bool runOnFunction(Function &F) { if (F.getTargetTriple() != ""nvptx"") { return false; } NVPTXTargetSpecificValue = F.getTargetSpecificValue(NVPTX); return true; }" LLVM,NVPTX,274,"string getImageHandleSymbol(int index){ if (index < 0) { return ""Bad index""; } else { string symbol = getSymbolFromIndex(index); return symbol + "",NVPTX""; } }" LLVM,NVPTX,275,int getImageHandleSymbolIndex(Symbol symbol) { int targetValue = 0; if (symbol.isNVPTXSpecific()) { targetValue = 1; } return symbol.getIndex() + targetValue; } LLVM,NVPTX,276,"bool shouldOmitSectionDirective(StringRef SectionName) { if (TargetMachine.getTargetTriple().getArch() != llvm::Triple::nvptx) return false; if (SectionName == "".text"" || SectionName == "".data"" || SectionName == "".bss"") return true; return false; }" LLVM,NVPTX,277,"bool classof(const Value *Val) { return NVPTXISA::classof(Val) || NVPTXIntrinsic::classof(Val) || NVPTXMetadata::classof(Val) || NVPTXConstant::classof(Val) || NVPTXModule::classof(Val) || NVPTXFunction::classof(Val) || NVPTXBasicBlock::classof(Val) || NVPTXInstruction::classof(Val); } Where NVPTXISA, NVPTXIntrinsic, NVPTXMetadata, NVPTXConstant, NVPTXModule, NVPTXFunction, NVPTXBasicBlock, and NVPTXInstruction are subclasses of LLVM's Value class that represent different types of target specific values in the NVPTX backend. The classof method for each subclass would use the LLVM's isa, cast, and dyn_cast methods to determine if the given Value pointer is of that particular subclass, and return true or false accordingly. The classof method for NVPTXISA, which represents the target specific instruction set architecture, could be implemented as follows: static bool classof(const Value *Val) { return isa(Val); }" LLVM,NVPTX,278," const MachineOperand &MO) const { const MCSection *CurSection = MO.getExpr()->evaluateAsValue(MF)->findAssociatedSection(); if (!CurSection->isAbsolute()) { SmallVector Symbols; MO.getExpr()->findAssociatedSymbols(Symbols); for (const MCSymbol *S : Symbols) { if (S->isDefined()) return MF.getMachineMemOperand(MO, MachineMemOperand::MORef(needsLoad, S->getAssociatedSection()->getFragment())); } } return MF.getMachineMemOperand(MO, MachineMemOperand::MOAbs); }" LLVM,NVPTX,279, LLVM,NVPTX,280, LLVM,NVPTX,281," enum ExprKinds { GLOBAL_VARIABLE, SHARED_VARIABLE, CONSTANT, REGISTER, FUNCTION_CALL }" LLVM,NVPTX,282,"LLVMValueRef create(LLVMContextRef context, LLVMTypeRef structType, LLVMValueRef nvptx1, LLVMValueRef nvptx2, LLVMValueRef nvptx3, LLVMValueRef nvptx4 ) { LLVMValueRef structVal = LLVMGetUndef(structType); structVal = LLVMConstInsertValue(structVal, nvptx1, 0); structVal = LLVMConstInsertValue(structVal, nvptx2, 1); structVal = LLVMConstInsertValue(structVal, nvptx3, 2); structVal = LLVMConstInsertValue(structVal, nvptx4, 3); LLVMValueRef nvvmVal = LLVMConstInt(LLVMInt32TypeInContext(context), 1, 0); structVal = LLVMConstInsertValue(structVal, nvvmVal, 4); return structVal; }" LLVM,NVPTX,283,"void create(LLVMContext& C) { StructType *STy = StructType::create(C, ""NVPTX""); STy->setBody({ Type::getInt32Ty(C), Type::getInt32Ty(C), Type::getInt32Ty(C), Type::getInt32Ty(C) }); }" LLVM,NVPTX,284,"Instruction *Create(unsigned opcode, Value *operand) { LLVMContext &Ctx = getContext(); Instruction *UnaryInst = nullptr; switch (opcode) { case Instruction::Add: UnaryInst = BinaryOperator::Create(Instruction::Add, operand, ConstantInt::get(Type::getInt32Ty(Ctx), 1), ""add"", insertBefore); break; case Instruction::Sub: UnaryInst = BinaryOperator::Create(Instruction::Sub, operand, ConstantInt::get(Type::getInt32Ty(Ctx), 1), ""sub"", insertBefore); break; case Instruction::Mul: UnaryInst = BinaryOperator::Create(Instruction::Mul, operand, ConstantInt::get(Type::getInt32Ty(Ctx), 2), ""mul"", insertBefore); break; case Instruction::Div: UnaryInst = BinaryOperator::Create(Instruction::Div, operand, ConstantInt::get(Type::getInt32Ty(Ctx), 2), ""div"", insertBefore); break; default: report_fatal_error(""Invalid instruction opcode for creating unary instruction.""); } UnaryInst->setMetadata(""nvptx"", ConstantAsMetadata::get(ConstantInt::get(Type::getInt8Ty(Ctx), NVPTX))); UnaryInst->setMetadata(""nvptx"", ConstantAsMetadata::get(ConstantInt::get(Type::getInt8Ty(Ctx), NVPTX))); UnaryInst->setMetadata(""nvptx"", ConstantAsMetadata::get(ConstantInt::get(Type::getInt8Ty(Ctx), NVPTX))); UnaryInst->setMetadata(""nvptx"", ConstantAsMetadata::get(ConstantInt::get(Type::getInt8Ty(Ctx), NVPTX))); return UnaryInst; }" LLVM,NVPTX,285,"const char *getPassName() { return ""NVPTX optimize redundant cvta.to.local instruction""; }" LLVM,NVPTX,286, LLVM,NVPTX,287,bool runOnMachineFunction(Function &MF) { NVPTX::VRFrame &Frame = getTargetNVPTXFrameInfo(); NVPTX::VRFrame &Allocations = getTargetNVPTXAllocations(); return true; } LLVM,NVPTX,288, LLVM,NVPTX,289,"void runOnMachineFunction(MachineFunction &MF) { LLVMContext &Context = MF.getFunction().getContext(); Module *M = MF.getFunction().getParent(); for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { const TargetInstrDesc &Desc = MI.getDesc(); uint16_t Opcode = Desc.getOpcode(); const MCInstrDesc &MCID = getMCInstrDesc(Opcode); const MCInstrDesc &MCID = getNVCInstrDesc(Opcode); const MCInstrDesc &MCID = getNVKInstrDesc(Opcode); const MCInstrDesc &MCID = getNVTInstrDesc(Opcode); DebugLoc DL = MI.getDebugLoc(); const NVPTXMachineInstrInfo &NMI = MF.getSubtarget().getInstrInfo(); MCInstBuilder MIB = NMI.BuildMI(MBB, DL, MCID); for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { MachineOperand MO = MI.getOperand(I); switch (MO.getType()) { case MachineOperand::MO_Register: MIB.addReg(MO.getReg(), getRegState(MO), MO.getSubReg()); break; case MachineOperand::MO_Immediate: MIB.addImm(MO.getImm()); break; case MachineOperand::MO_FPImmediate: MIB.addFP(MO.getFPImm()); break; case MachineOperand::MO_MachineBasicBlock: MIB.addMBB(MO.getMBB()); break; case MachineOperand::MO_JumpTarget: MIB.addJT(MO.getJumpTarget()); break; case MachineOperand::MO_GlobalAddress: MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset(), MO.getTargetFlags()); break; case MachineOperand::MO_ConstantPoolIndex: MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset(), MO.getTargetFlags()); break; default: llvm_unreachable(""Unknown operand type""); } } } } }" LLVM,NVPTX,290,void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addPreserved(); AU.addPreserved(); AU.addPreserved(); AU.addPreserved(); TargetPassConfig::getAnalysisUsage(AU); } LLVM,NVPTX,291,"namespace llvm { std::string getPassName( int passID) { if ( passID == LLVMNVPTX:: NVPTX_OPTIMIZE_REDUNDANT_CVTA_TO_LOCAL) { return ""NVPTX optimize redundant cvta.to.local instruction""; } return """"; } class MyPass : public PassInfoMixin { public: MyPass() {} Result run( Function& F, FunctionAnalysisManager& AM) { int passID = LLVMNVPTX:: NVPTX_OPTIMIZE_REDUNDANT_CVTA_TO_LOCAL; outs() << getPassName(passID); return PreservedAnalyses::all(); }" LLVM,NVPTX,292,"void runOnMachineFunction(MachineFunction &MF) { const TargetSubtargetInfo &STI = MF.getSubtarget(); const TargetInstrInfo &TII = *STI.getInstrInfo(); const NVPTXInstrInfo *NVPTXII = static_cast(TII); const NVPTXFrameLowering *NVPTXFL = STI.getFrameLowering(); MachineModuleInfo *MMI = MF.getMMI(); const MachineFunction &MF = MMI->getMachineFunction(); MachineBasicBlock &EntryMBB = MF.CreateMachineBasicBlock(); MF.insert(MF.begin(), &EntryMBB); MachineRegisterInfo &MRI = MF.getRegInfo(); MachineConstantPool *MCP = MMI->getConstantPool(); MachineJumpTableInfo *MJTI = MMI->getJumpTableInfo(); MachineDominatorTree *MDT = nullptr; if (MF.shouldKeepCFG()) { MDT = new MachineDominatorTree(MF); } MachineLoopInfo *MLI = nullptr; if (MF.shouldKeepCFG()) { MLI = new MachineLoopInfo(*MDT); } MachineBranchProbabilityInfo *MBPI = nullptr; if (MF.shouldTrackBranchProbability()) { MBPI = new MachineBranchProbabilityInfo(MF); } MachinePostDominatorTree *MPDT = nullptr; if (MF.shouldKeepCFG() && MF.shouldDoPostMISched()) { MPDT = new MachinePostDominatorTree(); MPDT->runOnMachineFunction(MF); }" LLVM,NVPTX,293,"bool runOnMachineFunction(MachineFunction &MF) { const NVPTXSubtarget &ST = MF.getSubtarget(); MachineBasicBlock &MBB = MF.front(); NVPTXMachineFunctionInfo *MFI = MF.getInfo(); MFI->setTargetSpecificValue(NVPTX, NVPTX, 0, 0); for (auto &MI : MBB) { unsigned Opcode = MI.getOpcode(); if (Opcode == NVPTX::RET) { Outs << '\t' << ""return""; unsigned NumOperands = MI.getNumOperands(); if (NumOperands > 0) { unsigned RetValReg = MI.getOperand(0).getReg(); Outs << ' ' << getRegisterName(RetValReg); } Outs << '\n'; } " LLVM,NVPTX,294,"void runOnMachineFunction(MachineFunction &MF) { MF.setSubtarget(NVPTX::NVPTXSubtarget::NVPTX, NVPTX::NVPTXSubtarget::NVPTX, 0, 0); }" LLVM,NVPTX,295, LLVM,NVPTX,296, LLVM,NVPTX,297, LLVM,NVPTX,298,"void runOnMachineFunction(MachineFunction &MF) { MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); Register FI = getFrameIndexOperand(MF); MachineInstr *DBG = BuildMI(MF, DebugLoc(), TII.get(TargetOpcode::DBG_VALUE)) .addFrameIndex(FI) .addImm(0) .addImm(0) .addImm(""Frame indices can only appear as the first operand of a DBG_VALUE machine instruction"") .addImm(0) .addImm(0) .addImm(0) .addImm(3) .addImm(0); MF.insert(MF.begin(), DBG); MRI.setRegUnitDead(TRI->getFrameRegister(MF)); MachineBasicBlock &MBB = MF.front(); MachineFunction::iterator MBBI = MBB.begin(), E = MBB.end(); while (MBBI != E) { EmitInstruction(*MBBI); ++MBBI; } }" LLVM,NVPTX,299,"void runOnMachineFunction(MachineFunction &MF) { if (MF.getTarget().getTargetTriple() != ""NVPTX-NVPTX-0-0"") { return; } MachineBasicBlock &MBB = MF.front(); for (MachineBasicBlock::iterator MI = MBB.begin(), E = MBB.end(); MI != E; ++MI) { MachineInstr &I = *MI; if (!isTargetSpecific(I)) { continue; } emitTargetInstruction(I); } if (needsAdditionalCodeGen(MF)) { handleAdditionalCodeGen(MF); } return; }" LLVM,NVPTX,300,"bool runOnMachineFunction(MachineFunction &MF) { MachineModuleInfo &MMI = MF.getMMI(); const Module *M = MMI.getModule(); MachineModuleInfoNVPTX &MMINVPTX = MMI.getObjFileInfo(); const NVPTXTargetStreamer &Stream = MMI.getObjFileInfo(); const ModuleManager &MM = Stream.getModuleManager(); const MachineFunctionInfo *MFI = MF.getInfo(); MachineFunctionInfoNVPTX *MFInfo = MF.getInfo(); MFInfo->init(); const NVPTXRegisterInfo *TRI = static_cast(MF.getTarget().getRegisterInfo()); MachineRegisterInfo &MRI = MF.getRegInfo(); const Function *F = MF.getFunction(); bool isKernel = F->hasFnAttribute(""nvvm-kernel""); MMI.beginFunction(&MF, isKernel); const FunctionType *FTy = F->getFunctionType(); const SmallVector &ReservedRegs = TRI->getReservedRegs(); for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) { Register ArgReg = MRI.createGenericVirtualRegister(ReservedRegs[i]); ArgReg.SubRegs = isKernel ? TRI->getSubRegs(ArgReg.Reg) : NULL; MRI.addLiveIn(ReservedRegs[i], ArgReg); } MMI.beginFunction(&MFINFPTX); LLVM_DEBUG(errs() << ""Machine Function["" << MF.getFunctionNumber() << ""] emitted\n""); return true; }" LLVM,NVPTX,301,"bool runOnMachineFunction(MachineFunction &MF) { const Triple &TT = MF.getTarget().getTargetTriple(); if (!TT.isNVPTX()) return false; MF.getTarget().setTargetSpecificOption(""nvptx"", ""nvptx""); MF.getTarget().setTargetSpecificOption(""nvptx-float-mode"", ""0""); MF.getTarget().setTargetSpecificOption(""nvptx-long-double-size"", ""0""); return true; }" LLVM,NVPTX,302,bool runOnMachineFunction(MachineFunction &MF){ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); const uint32_t TargetID = NVPTX; const uint32_t FeatureBits = NVPTX; MachineModuleInfo MMI(MF.getFunction().getParent()); MachineIRBuilder MIRBuilder(MF); const MCInstrDesc &PrologueDesc = TII->get(NVPTX::PROLOG_LABEL); MachineInstrBuilder MIB = MIRBuilder.buildInstr(PrologueDesc); MIB.addImm(TargetID); MIB.addImm(FeatureBits); const MCInstrDesc &EpilogueDesc = TII->get(NVPTX::EPILOG_LABEL); MIB = MIRBuilder.buildInstr(EpilogueDesc); MIB.addImm(TargetID); MIB.addImm(FeatureBits); MIRBuilder.finalize(MF); return true; } LLVM,NVPTX,303,"bool runOnMachineFunction(MachineFunction &MF) { const NVPTXTargetMachine &TM = static_cast(MF.getTarget()); const NVPTXInstrInfo *TII = TM.getInstrInfo(); const MachineFunctionProperties &MFProps = MF.getProperties(); MachineFrameInfo &MFI = MF.getFrameInfo(); if (MFI.getNumObjects() == 0) { return false; } for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); MBBI != E; ++MBBI) { MachineBasicBlock &MBB = *MBBI; for (MachineBasicBlock::iterator MII = MBB.begin(), ME = MBB.end(); MII != ME; ++MII) { MachineInstr &MI = *MII; if (MI.isDebugValue()) { const MachineOperand &MO = MI.getOperand(0); const MachineOperand &MO1 = MI.getOperand(1); if (MO.isFI()) { BuildMI(MBB, MII, MI.getDebugLoc(), TII->get((unsigned)NVPTX::DBG_VALUE)) .add(MO) .add(MO1) .addImm(0) .addImm(0) .add(""Frame indices can only appear as the first "") .add(""operand of a DBG_VALUE machine instruction"") .addImm(0) .addImm(0) .addImm(0) .addImm(0); } } } } return true; }" LLVM,NVPTX,304,void runOnMachineFunction(MachineFunction &MF) { } LLVM,NVPTX,305, LLVM,NVPTX,306,"void runOnMachineFunction(MachineFunction &MF) { const TargetLowering *TLI = MF.getTarget().getTargetLowering(); const NVPTXTargetLowering *NVTX_TLI = static_cast(TLI); const NVPTXMachineFunctionInfo *NVPTX_MFI = MF.getInfo(); for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { unsigned Opcode = MI.getOpcode(); switch (Opcode) { case NVPTX::NVPTX_CAT: case NVPTX::NVPTX_SMEET: default: NVTX_TLI->emitInstruction(&MBB, &MI); break; } } } MF.getRegInfo().setMapping(NVPTX::NVPTX, NVTX_TLI->getRegisterInfo(MF)); MF.getRegInfo().setMapping(NVPTX::NVPTX_MEM, NVTX_TLI->getMemoryInfo(MF)); MF.getRegInfo().setMapping(NVPTX::NVPTX_ZERO, NVTX_TLI->getZeroReg(MF)); MF.getRegInfo().setMapping(NVPTX::NVPTX_ONE, NVTX_TLI->getOneReg(MF)); NVPTX_MFI->setNumHWRegs(TLI->getNumRegs()); NVPTX_MFI->setScratchSize(TLI->getScratchMemSize(&MF)); NVPTX_MFI->setMaxBRegs(NVPTX_MFI->useAllBRegs() ? TLI->getMaxBRegs() : NVPTX_MFI->getNumBRegs()); NVPTX_MFI->finalizeLowering(); }" LLVM,NVPTX,307,"std::string getPassName() { return ""NVPTX Proxy Register Instruction Erasure""; }" LLVM,NVPTX,308,"void runOnMachineFunction(MachineFunction &MF) { MachineBasicBlock &MBB = MF.front(); MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetRegisterClass *I1RC = MRI.getTargetRegisterInfo()->getRegClass(NVPTX::ProxyRegI1); const TargetRegisterClass *I16RC = MRI.getTargetRegisterInfo()->getRegClass(NVPTX::ProxyRegI16); const TargetRegisterClass *I32RC = MRI.getTargetRegisterInfo()->getRegClass(NVPTX::ProxyRegI32); const TargetRegisterClass *I64RC = MRI.getTargetRegisterInfo()->getRegClass(NVPTX::ProxyRegI64); const TargetRegisterClass *F16RC = MRI.getTargetRegisterInfo()->getRegClass(NVPTX::ProxyRegF16); const TargetRegisterClass *F16x2RC = MRI.getTargetRegisterInfo()->getRegClass(NVPTX::ProxyRegF16x2); const TargetRegisterClass *F32RC = MRI.getTargetRegisterInfo()->getRegClass(NVPTX::ProxyRegF32); const TargetRegisterClass *F64RC = MRI.getTargetRegisterInfo()->getRegClass(NVPTX::ProxyRegF64); for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ++I) { MachineInstr &MI = *I; for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (MO.isReg()) { unsigned Reg = MO.getReg(); if (NVPTX::ProxyRegI1.contains(Reg)) { MO.setReg(MRI.createVirtualRegister(I1RC)); } else if (NVPTX::ProxyRegI16.contains(Reg)) { MO.setReg(MRI.createVirtualRegister(I16RC)); }" LLVM,NVPTX,309,"std::string getName(const llvm::Function &F, llvm::StringRef TargetID) { if (TargetID != ""NVPTX"") { return """"; } llvm::BasicBlock &BB = F.getEntryBlock(); llvm::LoopInfoEx &LI = llvm::getAnalysis(F); const llvm::DenseMap *VFMap = LI.getVectorizationFactor(); const llvm::DenseMap *UFMap = LI.getUnrollFactor(); std::vector VFList; std::vector UFList; for (llvm::Loop *L : LI) { auto VFIt = VFMap->find(L); auto UFIt = UFMap->find(L); if (VFIt != VFMap->end()) { VFList.push_back(VFIt->second); } if (UFIt != UFMap->end()) { UFList.push_back(UFIt->second); } } std::string OutputStr = ""Plan name: "" + F.getName().str() + "" | VFs: ""; for (auto VF : VFList) { OutputStr += std::to_string(VF) + "" ""; } OutputStr += ""| UFs: ""; for (auto UF : UFList) { OutputStr += std::to_string(UF) + "" ""; } return OutputStr; }" LLVM,NVPTX,310," bool &ChangesMade) { const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); unsigned FrameReg = TRI->getFrameRegister(MF); MachineBasicBlock &MBB = MF.front(); MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); while (I != E) { MachineInstr &MI = *I; bool IsCall = MI.isCall(); bool IsStore = MI.getOpcode() == NVPTX::ST; int FrameNDXOperand = MI.findRegisterUseOperandIdx(FrameReg, false); if (FrameNDXOperand != -1) { MachineOperand &FrameNDX = MI.getOperand(FrameNDXOperand); FrameNDX.ChangeToImmediate(0); if (IsCall) { MachineOperand &MNCC = MI.getOperand(FrameNDXOperand - 1); if (MNCC.isFI()) { MNCC.ChangeToImmediate(0); } } if (IsStore) { MachineOperand &StoreOperand = MI.getOperand(FrameNDXOperand + 1); if (StoreOperand.isFI()) { StoreOperand.ChangeToImmediate(0); } } } ++I; } ChangesMade = true; }" LLVM,NVPTX,311, LLVM,NVPTX,312,BitVector getReservedRegs() { BitVector ReservedRegs(NVPTX::NUM_TARGET_REGS); ReservedRegs.set(NVPTX::ENVREG0); ReservedRegs.set(NVPTX::ENVREG31); ReservedRegs.set(NVPTX::VRFrame32); ReservedRegs.set(NVPTX::VRFrameLocal32); ReservedRegs.set(NVPTX::VRFrame64); ReservedRegs.set(NVPTX::VRFrameLocal64); ReservedRegs.set(NVPTX::VRDepot); return ReservedRegs; } LLVM,NVPTX,313,"void getCalleeSavedRegs(const MachineFunction &MF, BitVector &SavedRegs, const TargetRegisterInfo *TRI) { const TargetMachine &TM = MF.getTarget(); const uint32_t CalleeSavedRegs[] = {NVPTX::NVPTX, NVPTX::NVPTX, 0}; const uint32_t *CalleeSavedRegsBegin = CalleeSavedRegs; const uint32_t *CalleeSavedRegsEnd = CalleeSavedRegs + array_lengthof(CalleeSavedRegs); for (uint32_t RegNum : make_range(CalleeSavedRegsBegin, CalleeSavedRegsEnd)) { SavedRegs.set(RegNum); } }" LLVM,NVPTX,314,"int getDwarfRegNum(unsigned int regNum, unsigned int target) { if (target != NVPTX) { return -1; } if (regNum == 0) { return NVPTX; } if (regNum >= 1 && regNum <= 31) { return regNum + NVPTX; } return -1; } int main() { int dwarfRegNum = getDwarfRegNum(10, NVPTX); if (dwarfRegNum >= 0) { printf(""Dwarf register number for NVPTX register 10 is %d\n"", dwarfRegNum); } else { printf(""Invalid target register number\n""); } return 0; }" LLVM,NVPTX,315,Register getRARegister(void) const { Register RAReg = NVPTX::VR0; return RAReg; } LLVM,NVPTX,316," MachineBasicBlock::iterator MI) const { for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ) { MachineInstr &MI = *I; if (MI.getOpcode() == NVPTX::VRFrame) { unsigned FrameIndex = MI.getOperand(1).getIndex(); MI.setDesc(TII->get(NVPTX::IMPLICIT_DEF)); MI.RemoveOperand(1); MI.addReg(0); MI.getOperand(1).setImm(FrameIndex); } ++I; } }" LLVM,NVPTX,317,"std::vector getCalleeSavedRegs(NVPTXTarget const &target, int target_specific_value) { if (target_specific_value != NVPTX,NVPTX,0) { return {}; } std::vector callee_saved_regs; for (MCPhysReg reg : target.getRegisters()) { if (isCalleeSaved(reg, target)) { callee_saved_regs.push_back(reg); } } return callee_saved_regs; } bool isCalleeSaved(MCPhysReg reg, NVPTXTarget const &target) { return !isReturnReg(reg, target) && !isCallerModified(reg, target); }" LLVM,NVPTX,318, LLVM,NVPTX,319,"void eliminateFrameIndex(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI){ if (!MF.getTarget().isNVPTX()) return; MachineFrameInfo &MFI = MF.getFrameInfo(); DebugLoc DL = MI->getDebugLoc(); for (MachineBasicBlock::iterator MII = MBB.begin(), MIE = MBB.end(); MII != MIE; ++MII){ if (MII->isTargetSpecific() && MII->getOpcode() == NVPTX::VRFrame){ unsigned FrameIdx = MII->getOperand(0).getIndex(); if (MFI.isFrameIndexUsed(FrameIdx)){ int FIdx = MFI.getObjectIndexBegin() + FrameIdx; MFI.replaceFrameIndex(FrameIdx, FIdx); MII->eraseFromParent(); } } } }" LLVM,NVPTX,320, LLVM,NVPTX,321, LLVM,NVPTX,322, LLVM,NVPTX,323,"void eliminateFrameIndex(MachineInstr *MI, unsigned int SPAdj, unsigned int FIOpNum, RegScavenger *RS = nullptr){ if (MI->getNumExplicitOperands() <= FIOpNum || !MI->getOperand(FIOpNum).isFI()) { report_fatal_error(""Instr doesn't have FrameIndex operand!""); } const MachineOperand &FIOp = MI->getOperand(FIOpNum); int FI = FIOp.getIndex(); int FIOffset = PPCFrameLowering::getFrameIndexOffset(MFI, FI); unsigned int FIReg = getFrameRegister(MF); if (FIOffset == 0) { MI->getOperand(FIOpNum).ChangeToRegister(FIReg, false); } else { unsigned int VReg; VReg = MF->getRegInfo().createVirtualRegister(&NVPTX::VRFrame); MachineInstrBuilder MIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), NVPTX::ADD, VReg); MIB.addReg(FIReg); MIB.addImm(FIOffset); MI->getOperand(FIOpNum).ChangeToRegister(VReg, false); } MI->RemoveOperand(FIOpNum); }" LLVM,NVPTX,324," MachineBasicBlock::iterator MI) override { for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ++I) { MachineInstr &Inst = *I; if (Inst.hasTargetMemoryOpcode() && Inst.getNumOperands() > 0 && Inst.getOperand(0).isFI()) { unsigned FrameIndexOpIdx = Inst.getOperand(0).getOperandNo(); Register Reg = getRegForFrameIndex(FrameIndexOpIdx, MF); Inst.getOperand(0).ChangeToRegister(Reg, false); } } MachineBasicBlock::eliminateFrameIndex(MF, MBB, MI); }" LLVM,NVPTX,325, LLVM,NVPTX,326,void dummyFunction() { llvm::Register frameReg = getFrameRegister(this); frameReg.dump(); } LLVM,NVPTX,327,"llvm::BitVector llvm::NVPTXRegisterInfo::getReservedRegs(const llvm::TargetRegisterInfo *TRI, const llvm::MachineFunction &MF) { unsigned NumRegs = TRI->getNumRegs(); llvm::BitVector ReservedRegs(NumRegs); for (int Reg = 0; Reg < NumRegs; Reg++) { if (TRI->isReserved(Reg)) { ReservedRegs.set(Reg); } } return ReservedRegs; }" LLVM,NVPTX,328,"StringRef getPassName() { StringRef target = ""NVPTX Replace Image Handles""; return target; }" LLVM,NVPTX,329, LLVM,NVPTX,330,"string getPassName() { return ""NVPTX Replace Image Handles""; }" LLVM,NVPTX,331, LLVM,NVPTX,332,"string getPassName() { return ""NVPTX Replace Image Handles""; }" LLVM,NVPTX,333, LLVM,NVPTX,334, LLVM,NVPTX,335,bool isVirtualSection(Section section) { TargetSpecificValue targetSpecific = section.getTargetSpecificValue(NVPTX); if (targetSpecific.hasObjectFileContent()) { return false; } else { return true; } } LLVM,NVPTX,336, LLVM,NVPTX,337,void getAnalysisUsage(AnalysisUsage &Info) const override { LLVMTargetMachine::getAnalysisUsage(Info); Info.addRequired(); Info.addPreserved(); Info.addRequired(); Info.addRequired(); Info.addRequired(); } LLVM,NVPTX,338,"std::string getPassName() { return ""Split basic blocks at barrier""; }" LLVM,NVPTX,339,"bool runOnFunction(Function *F, PassManager &PM) { bool modified = false; for (BasicBlock &BB : *F) { for (Instruction &I : BB) { if (auto *CI = dyn_cast(&I)) { if (CI->getCalledFunction()->getName() == ""bar_split"") { auto *bar = CallInst::CreateIntrinsic( Intrinsic::nvvm_bar_syncscope0_smem, None, {CI} ); BB.getInstList().insertAfter(CI, bar); BB.splitBasicBlock(bar->getIterator()); modified = true; } if (CI->getCalledFunction()->getName() == ""NVPTX.barrier"") { CI->eraseFromParent(); modified = true; } } } } if (modified) { PM.markFunctionAsModified(F); } return modified; }" LLVM,NVPTX,340,"void initializeSubtargetDependencies(CPU c, TuneCPU t, std::string feature) { const llvm::SubtargetFeatures featureList {feature}; const llvm::Target *parent = llvm::TargetRegistry::lookupTarget(""NVPTX""); assert(parent && ""NVPTX target not found""); llvm::TargetOptions options = getDefaultTargetOptions(); llvm::TargetMachine *tm = parent->createTargetMachine(""sm_20"", """", """", options, llvm::Reloc::Model::Static); tm->createPassConfig(llvm::CodeGenOpt::Level::Aggressive); llvm::CodeModel::Model CM = llvm::CodeModel::Model::Small; llvm::SubtargetFeatures subtargetFeatures(featureList.getString()); const NVPTXSubtarget subtarget = parent->createSubtargetImpl(NVPTX_MC::getFeatureBits(""sm_20"", 0, 32), feature, subtargetFeatures, *tm, """", CM); subtarget.initializeSubtargetDependencies(c, t, feature); }" LLVM,NVPTX,341,"void NVPTXSubtarget(LLVMModule *M) { NVPTXSubtargetData = {NVPTX, NVPTX, NVPTX, NVPTX, NVPTX, 0, 20}; M->addTargetSubtargetInfo( NVPTXCreateTargetSubtargetInfo(NVPTX, NVPTXSubtargetData)); }" LLVM,NVPTX,342,"TargetInstrInfo* getInstrInfo(const NVPTXTargetMachine &TM, NVPTXSubtarget &STI) { TargetInstrInfo *TII = new TargetInstrInfo(TM); TII->setNVPTX(STI); return TII; }" LLVM,NVPTX,343,"TargetInstrInfo* getRegisterInfo(const Target &T, const Pass &P) { if (T.getArch() != NVPTX::Arch) return nullptr; const TargetRegisterInfo *TRI = T.getRegisterInfo(); const TargetInstrInfo *TII = T.getInstrInfo(); NVPTX::TargetInstrInfo *TargetInfo = new NVPTX::TargetInstrInfo(TRI); TargetInfo->TII = TII; return TargetInfo; }" LLVM,NVPTX,344,"void initializeSubtargetDependencies(NVPTXSubtarget &ST, const NVPTXTargetMachine &TM, StringRef CPU, StringRef TuneCPU, StringRef FeatureStr) { ST.setCPU(CPU); ST.setTuneCPU(TuneCPU); ST.setFeatureString(FeatureStr); ST.setTargetID(NVPTX::NVPTX); ST.setFeatureString(""""); ST.setTargetSpecificFlag(TargetInfo::CF_NVIDIATARGET); ST.setTargetSpecificFlag(FeatureBitset(NVPTX::FeatureSet_SM_20)); ST.dataSectionAlignment = 32; ST.toplevelSchedulingPreference = Sched::Source; }" LLVM,NVPTX,345,"void NVPTXSubtarget(Module *M) { NVPTXSubtarget *NVPTXTarget = new NVPTXSubtarget(); NVPTXTarget->setNVPTXVersion(""NVPTX""); NVPTXTarget->setTargetID(""NVPTX""); NVPTXTarget->setFeatureString(""NVPTX,NVPTX,NVPTX,NVPTX,NVPTX,0,20""); M->setSubtarget(NVPTXTarget); return NVPTXTarget; }" LLVM,NVPTX,346,"void NVPTXSubtarget(Module& M) { M.Arch = triple::nvptx; M.CPU = ""nvptx""; M.Triple = llvm::Triple::normalize(""nvptx;32""); M.DataLayout = ""e-p:32:32""; M.PointerAlign = 32; M.Option = ""nvptx;nvptx;nvvm;0;10;sm_10;we are not using FeatureStr;0;31""; }" LLVM,NVPTX,347,"NVPTXSubtarget NVPTXSubtarget(Module &M) { TargetOptions Options; NVPTXSubtarget Subtarget(M, std::string(M.getTargetTriple().getArchName()), std::string(M.getTargetTriple().getTriple())); Subtarget.is64Bit() = (M.getDataLayout().getPointerSize(0) == 8); Subtarget.hasDRISC() = strcmp(M.getTargetTriple().getArchName(), ""NVPTX64"") == 0 || strcmp(M.getTargetTriple().getArchName(), ""NVPTX"") == 0; Subtarget.hasImageHandles() = true; Subtarget.hasAtomics() = true; Subtarget.hasFMA() = true; Subtarget.hasLDG() = true; Subtarget.hasSM75() = false; Subtarget.GVAlignInBits = 128; Subtarget.SharedAlignInBits = 32; Subtarget.LocalAlignInBits = 32; Subtarget.GenericAlignInBits = 32; Subtarget.StackAlignment = 256; Subtarget.FunctionAlignment = 1; Subtarget.enableExtPipes() = true; Subtarget.enableMemoryPredication() = true; Subtarget.features().AddFeature(""ptx50""); Subtarget.features().AddFeature(""width64""); Subtarget.features().AddFeature(""stride128""); Subtarget.sm_version = 75; Subtarget.setTargetAEmm( Subtarget.hasSM50(), Subtarget.hasSM61(), Subtarget.hasSM70(), Subtarget.hasSM75(), 0, 20); return Subtarget; }" LLVM,NVPTX,348,"DataLayout getDataLayout() { string targetLayout = ""NVPTX,e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"",""f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-"",""n16:32:64"",""e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"",""f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-"",""n16:32:64""; DataLayout dl = DataLayout(targetLayout); return dl; }" LLVM,NVPTX,349,"namespace llvm { class NVPTXSubtarget : public NVPTXGenSubtargetInfo { virtual void anchor(); NVPTXSubtarget &operator=(const NVPTXSubtarget &) = delete; public: NVPTXSubtarget() : NVPTXGenSubtargetInfo(NVPTX()); { TargetTriple = Triple(""nvptx-unknown-unknown""); Is64Bit = false; PointerABIAlign = 32; DataLayoutString = ""e-S32-p:32:32:32-S0-i64:64-v16:16-v32:32-n16:32:64""; Textures = true; SizeInBits = 32; UseCUDA = true; TargetABI = TargetMachine::CodeGenFileType::CGFT_ObjectFile; ComputeCapability = ""sm_20""; FeatureString = ""we are not using FeatureStr""; CodeModel = NVPTX::NVCL; CPU = ""0""; MaxRegisterCount = 31; }" LLVM,NVPTX,350,"void initializeSubtargetDependencies(CPU, TuneCPU, FeatureString) { Subtarget->setTargetArch(NVPTX); Subtarget->setCPU(TuneCPU); Subtarget->ToggleFeature(""sm_20"", true); Subtarget->setTargetVersion(0); Subtarget->setTargetAddressSpace(32); }" LLVM,NVPTX,351," InstrItins(NVPTXInstrItineraryData) { std::string CPUName = CPU; if (CPUName.empty()) CPUName = ""sm_10""; ParseSubtargetFeatures(CPUName, FS); } void NVPTXSubtarget::ParseSubtargetFeatures(StringRef CPUName, StringRef FS) { if (FS.empty()) FS = ""+ptx20,+ptx30,+ptx40""; std::string TargetFS = ""ptx20,ptx30,ptx40""; if (CPUName == ""ptx"") { if (!FeatureStr.empty()) TargetFS = ""+"" + TargetFS; ParseSubtargetFeatures(TargetFS, """"); return; } if (CPUName == ""ptx20"") InstrInfo = &NVPTXInstrInfo::ptx20InstrInfo; else if (CPUName == ""ptx30"") InstrInfo = &NVPTXInstrInfo::ptx30InstrInfo; else InstrInfo = &NVPTXInstrInfo::ptx40InstrInfo; InitCPUSchedModel(CPUName); ParseSubtargetFeatures(TargetFS, FS); }" LLVM,NVPTX,352,"void initializeSubtargetDependencies(CPU, TuneCPU, featureStr) { LLVMInitializeNVPTXTargetInfo(); LLVMInitializeNVPTXTarget(); LLVMInitializeNVPTXTargetMC(); StringRef CPU(CPU); StringRef TuneCPU(TuneCPU); StringRef featureStr(featureStr); NVPTX::initializeSubtargetDependencies(CPU, TuneCPU, featureStr); }" LLVM,NVPTX,353,"void NVPTXSubtarget(Module *M) { NVPTXTargetMachine TM; NVPTXSubtarget STI; M->setTargetTriple(""nvptx64-nvidia-cuda""); STI.NVPTX::PTXVersion = NVPTX::PTX20; STI.NVPTX::PTXFeatureVersion = 0; STI.NVPTX::TargetCPU = NVPTX::SM_20; STI.NVPTX::TargetABI = NVPTX::NVCL; STI.NVPTX::TargetType = NVPTX::CUDA; TM.initTargetMachine(); M->setTargetMachine(TM); llvm::errs() << ""PTXVersion: "" << STI.NVPTX::PTXVersion << ""\n""; llvm::errs() << ""PTXFeatureVersion: "" << STI.NVPTX::PTXFeatureVersion << ""\n""; llvm::errs() << ""TargetCPU: "" << STI.NVPTX::TargetCPU << ""\n""; llvm::errs() << ""TargetABI: "" << STI.NVPTX::TargetABI << ""\n""; llvm::errs() << ""TargetType: "" << STI.NVPTX::TargetType << ""\n""; }" LLVM,NVPTX,354,"DataLayout getDataLayout(Module *M) { TargetData *TD = M->getTargetData(); std::string DataLayoutString = """"; DataLayoutString += ""ftype32:32-v1:32:32-v2:32:32-v4:32:32-v8:32:32-v16:32:32-v32:32:32""; DataLayoutString += ""-n32:64""; DataLayoutString += ""-align:128""; DataLayoutString += ""-itas:0:32""; DataLayoutString += ""-p0:32:32""; DataLayout DL(DataLayoutString); return DL; }" LLVM,NVPTX,355,function NVPTXSubtarget (module) { this.NVPTX = module this.NVPTX = module this.NVPTX = module this.NVPTX = module this.NVPTX = module this.NVPTX = 0 this.NVPTX = 20 } LLVM,NVPTX,356, GPU(M.getTargetTriple().isOSBinFormatMachO() ? NVPTX::NVCL : NVPTX::CUDA) { ... } LLVM,NVPTX,357,"DataLayout getDataLayout(Module &M, const TargetOptions &Options) { Triple TargetTriple = Triple(M.getTargetTriple()); std::string DataLayoutString = createNVPTXDataLayoutString(TargetTriple); return DataLayout(DataLayoutString); }" LLVM,NVPTX,358,"TargetInstrInfo &getInstrInfo() const { return static_cast(*this->SubtargetInfoT::getInstrInfo()); } ``` The function should take in the NVPTX backend as a parameter, and return a reference to the TargetInstrInfo for that backend. This function can be defined as a member function of the NVPTX TargetMachine class, or a standalone function in the NVPTXTargetMachine.cpp file. The purpose of this function is to provide access to the TargetInstrInfo for any code generation tasks related to the NVPTX backend. This includes retrieving information about individual instructions, as well as generating new instructions for the backend. The function should first cast the NVPTX backend to a TargetMachine class, and then use its getInstrInfo() function to retrieve the TargetInstrInfo for the NVPTX backend. This TargetInstrInfo object should then be returned by reference. In summary, the getInstrInfo function for NVPTX backend should have the following structure: ``` TargetInstrInfo &getInstrInfo(NVPTXTargetMachine &TM) const { return static_cast(*TM.getSubtargetImpl()->getInstrInfo()); }" LLVM,NVPTX,359,"TargetInstrInfo* getRegisterInfo(TargetMachine &TM) { return new NVPTXInstrInfo(NVPTX::getRegisterInfo(), TM); }" LLVM,NVPTX,360,"NVPTXSubtarget* NVPTXSubtarget(Module& M, NVPTX::TargetISA ISA) { NVPTXSubtarget* Subtarget = new NVPTXSubtarget(); initializeSubtargetDependencies(*Subtarget, M, ISA); Subtarget->setVersion(NVPTX::NVPTX); Subtarget->setTarget(NVPTX::NVPTX); Subtarget->setFeatureBits(0); Subtarget->setAlignementBits(20); Subtarget->setGPUType(NVPTX::NVCL); Subtarget->setArch(NVPTX::CUDA); return Subtarget; }" LLVM,NVPTX,361,void addFastRegAlloc(NVPTX& Target) { Target.addPass(createTargetTransformInfoWrapperPass(Target.getTM())); Target.addPass(createAlwaysInlinerLegacyPass()); Target.addPass(createNVVMReflectPass()); } LLVM,NVPTX,362, LLVM,NVPTX,363, LLVM,NVPTX,364,"void addOptimizedRegAlloc(TargetMachine &TM, PassManagerBase &PM) { const NVPTXSubtarget &ST = TM.getSubtarget(); PM.add(createNVPTXRegAllocPass(&ST)); PM.add(createMachineSchedulerPass()); PM.add(createStackSlotColoringPass()); PM.add(createNVPTXPostRAPass(&ST)); }" LLVM,NVPTX,365, const NVPTXTargetMachine &TM) { PM.add(createNVPTXTargetTransformInfoPass(TM)); PM.add(createNVPTXPostRAPass()); PM.add(createNVPTXPrologEpilogPass(TM)); PM.add(createNVPTXInsertChecksPass(TM)); PM.add(createNVPTXInsertNVVMIntrinsicsPass(TM)); PM.add(createNVPTXWrapNVVMIntrinsicsPass(TM)); PM.add(new NVPTXTargetMachine(TM)); } LLVM,NVPTX,366,"void addPreRegAlloc(llvm::FunctionPassManager &FPM, llvm::TargetMachine &TM, llvm::TargetIRAnalysis &TIRA) { llvm::FunctionPass *CustomPass = createCustomPass(); FPM.add(CustomPass); }" LLVM,NVPTX,367,void addRegAssignAndRewriteFast() { MachineFunction &MF = getMachineFunction(); MachineRegisterInfo& MRI = MF.getRegInfo(); const NVPTXSubtarget& ST = MF.getSubtarget(); MF.addPass(createNVPTXAssignRegistersPass(&NVPTX::Main_64RegClass)); MF.addPass(createNVPTXSimpleRegisterRewriter(ST)); if (ST.isCPURewriteEnabled()) MF.addPass(createNVPTXRegisterCopyPropagationPass()); if (ST.isCSEEnabled()) MF.addPass(createNVPTXRegisterCSEPass()); MF.addPass(createNVPTXScavengerPass()); MF.addPass(createNVPTXCSRPass()); MF.addPass(createNVPTXLiveRegisterInfo(ST)); return MF; } LLVM,NVPTX,368,"TargetPassConfig *createPassConfig(const TargetMachine &TM, PassManagerBase &PM) { TargetPassConfig *TPC = new TargetPassConfig(TM, PM); TPC->addPass(createNVPTXISelDag(TM)); TPC->addPass(createNVPTXExpandPseudoPass(TM)); TPC->addPass(createNVPTXLowerAggrCopiesPass(TM)); TPC->setOpt(&TM.getOptLevel()); return TPC; }" LLVM,NVPTX,369, LLVM,NVPTX,370,"void registerPassBuilderCallbacks() { PassBuilder &PB = getPassBuilder(); PB.registerPipelineParsingCallback( [](StringRef Name, ModulePassManager &MPM, ArrayRef) { if (Name == ""nvptx"") { MPM.addPass(createMyNVPTXPass()); return true; } return false; }); PB.registerPipelineParsingCallback( [](StringRef Name, ModulePassManager &MPM, ArrayRef) { if (Name == ""nvvm-reflect"") { MPM.addPass(createNVVMReflectPass()); return true; } return false; }); PB.registerPipelineParsingCallback( [](StringRef Name, ModulePassManager &MPM, ArrayRef) { if (Name == ""nvvm-intr-range"") { MPM.addPass(createNVVMIntrinsicRangePass()); return true; } return false; }); PB.registerLoopAnalyses([&PB](const PassBuilder::LoopAnalysesTy &LAM) { PB.registerLoopAnalysis(NVPTXTargetMachine::AM, LAM); PB.registerLoopAnalysis(NVVMTargetMachine::AM, LAM); }); }" LLVM,NVPTX,371,"void addFastRegAlloc() { StringRef TargetID = ""NVPTX""; StringRef Arch = ""NVPTX""; StringRef Message = ""NVPTX uses no regalloc!""; addPass(TargetID, Arch, Message); }" LLVM,NVPTX,372,"void addInstSelector() { InstructionSelector *NVPTX_InstSelector = new InstructionSelector(NVPTX); PM.addPass(NVPTX_InstSelector); printf(""Instruction Selector Pass for NVPTX has been successfully installed.\n""); }" LLVM,NVPTX,373,void addIRPasses() { addMachineIndependentOptimizationPass(); addNVPTXOptimizationPass(); addNVPTXCodeGenerationPass(); addNVPTXVectorizationPass(); addNVPTXBarrierRemovalPass(); addNVPTXLoopUnrollingPass(); addNVPTXIntrinsicOptimizerPass(); addNVPTXFloatingPointOptimizationPass(); addNVPTXSoftwarePipeliningPass(); } LLVM,NVPTX,374, LLVM,NVPTX,375,void addOptimizedRegAlloc(TargetPassConfig &PassConfig) { PassConfig->addPass(new NVPTXTargetMachineSignature()); PassConfig->addPass(createNVPTXFixupLivenessPass()); PassConfig->addPass(createNVPTXLowerLoadStorePass()); PassConfig->addPass(createNoRegAllocPass()); PassConfig->addPass(createStackSlotColoringPass()); } LLVM,NVPTX,376,void addPostRegAlloc(llvm::PassManager &PM) { llvm::FunctionPass *NVPTXP1 = new llvm::SomeNVPTXP1Pass(); llvm::FunctionPass *NVPTXP2 = new llvm::SomeNVPTXP2Pass(); llvm::FunctionPass *NVPTXP3 = new llvm::SomeNVPTXP3Pass(); llvm::FunctionPass *NVPTXP4 = new llvm::SomeNVPTXP4Pass(); PM.add(NVPTXP1); PM.add(NVPTXP2); PM.add(NVPTXP3); PM.add(NVPTXP4); PM.run(*M); } LLVM,NVPTX,377,PassConfig *createPassConfig(TargetMachine &TM) { auto *Conf = new PassConfig(TM); Conf->addPass(createNVPTXISelDag(Conf)); Conf->addPass(createNVPTXPeephole(Conf)); MCAsmInfo *MAI = new NVTMCAsmInfo(TM.getTargetTriple()); Conf->setMCAsmInfo(MAI); Conf->addMachinePasses(createNVPTXLowerThreadPrivate(Conf)); return Conf; } LLVM,NVPTX,378,"RegisterRegAlloc *createTargetRegisterAllocator(Module *M, TargetMachine &TM, PassManagerBase &PM) { NVPTXTargetMachine &NVPTXTM = static_cast(TM); const NVPTXSubtarget &ST = NVPTXTM.getSubtarget(*M); return new NVPTXRegisterAllocator(ST, M->getDataLayout()); }" LLVM,NVPTX,379,"TargetIRAnalysis* getTargetIRAnalysis(const TargetMachine& TM, const TargetLibraryInfo& TLI) { if (!TM.getTargetTriple().isNVPTX()) { return nullptr; } LLVMContext &Context = TM.getContext(); TargetIRAnalysis *TIA = new TargetIRAnalysis(Context); TIA->setTargetMachine(&TM); TIA->setTargetLibraryInfo(&TLI); TIA->setRelocationModel(TM.getRelocationModel()); TIA->setCodeModel(TM.getCodeModel()); return TIA; }" LLVM,NVPTX,380,"void addInstSelector(Module &M) { TargetMachine *TM = nullptr; std::string Err; const Target *TheTarget = TargetRegistry::lookupTarget(""nvptx64-nvidia-cuda"", Err); TM = TheTarget->createTargetMachine(""sm_XX"", """", """", TargetOptions(), None); PassRegistry *Registry = PassRegistry::getPassRegistry(); initializeTargetPassConfig(*Registry); TargetLibraryInfoWrapperPass *TLIWP = new TargetLibraryInfoWrapperPass(); TargetLibraryInfo *TLI = &TLIWP->getTLI(TM->getTargetTriple()); PassManagerBuilder Builder; Builder.LibraryInfo = TLI; Builder.Inliner = createFunctionInliningPass(); Builder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly, addOptimizationPasses); MachinePassRegistry *MPR = MachinePassRegistry::getPassRegistry(); const MCAsmInfo *MAI = TM->getMCAsmInfo(); const MCInstrInfo *MCII = TM->getMCInstrInfo(); const MCRegisterInfo *MRI = TM->getMCRegisterInfo(); const MCSubtargetInfo *STI = TM->getMCSubtargetInfo(); const MCCodeGenInfo *CGI = TM->getMCCodeGenInfo(); MCContext *Ctx = new MCContext(MAI, MRI, nullptr); MCTargetOptions MCOptions; MCOptions.AsmVerbose = false; MCOptions.MCInstPrintOpersMem = false; std::unique_ptr Str; if (CGI->ReverseAsmComments) { Str.reset(TM->getTarget().createAsmStreamer(*Ctx, Out, true, false, false)); } else { Str.reset(TM->getTarget().createCivilianStreamer(*Ctx, Out, false, true)); }" LLVM,NVPTX,381,NVPTXObjFileLowering getObjFileLowering(const TargetMachine &TM) { NVPTXObjFileLowering TLO; TLO.targetDataSize = TM.getDataLayout().getPointerSize(); TLO.targetAlign = TM.getDataLayout().getPointerABIAlignment(0); TLO.isLittleEndian = TM.getDataLayout().isLittleEndian(); TLO.hasIdentDirective = true; TLO.canRelaxRelocations = true; return TLO; } LLVM,NVPTX,382,NVPTXTargetMachine::getSubtargetImpl(const Function &F) const { return *this; } LLVM,NVPTX,383,"void addIRPasses(TargetPassConfig &PassConfig, bool IsOptimize) { PassConfig.addPass(createNVPTXIRLoweringPass()); PassConfig.addPass(createNVPTXTargetLibraryInfoPass()); PassConfig.addPass(createNVPTXDAGToDAGISelPass()); PassConfig.addPass(createNVPTXCodeGenPreparePass()); PassConfig.addPass(createNVPTXISelDag(PassConfig)); PassConfig.addPass(createNVPTXRegBankSelectPass()); PassConfig.addPass(createNVPTXILPInserter()); if (IsOptimize) { PassConfig.addPass(createNVPTXMCCodeEmitterPass()); } }" LLVM,NVPTX,384, LLVM,NVPTX,385,"void addInstSelector(LLVMContext& Ctx, Target& T) { if (T.getArch() != Triple::nvptx) { errs() << ""Error: This function is only meant for NVPTX backend.\n""; return; } RegisterBank* RB = new RegisterBank(""NVPTX"", T); RB->addRegClasses(); InstructionSelector* IS = new InstructionSelector(T, RB); IS->addTargetSpecificValue(TargetSpecificValue::NVPTX); IS->addTargetSpecificValue(TargetSpecificValue::NVPTX); IS->addTargetSpecificValue(TargetSpecificValue::NVPTX); IS->addTargetSpecificValue(TargetSpecificValue::NVPTX); PassManagerBuilder PMB; PMB.addExtension(PassManagerBuilder::EP_InstructionSelector, [&](PassManagerBuilder &Builder, PassManagerBase &PM) { PM.add(IS); return true; }); ModulePassManager PM; PM.setTarget(T); PM.add(IS); PM.run(Mod); errs() << ""NVPTX machine code:\n""; T.printMachineCode(Mod, errs()); }" LLVM,NVPTX,386,"void addPassesToEmitMC(PassManager &PM, TargetMachine &TM, raw_pwrite_stream &OS, raw_pwrite_stream &DWOOS, CodeGenFileType FileType, bool PreserveLocals) { PM.add(createGCInfoDeleter()); PM.add(createGCInfoRecorder(TM)); PM.add(createGCInfoEmitter(TM)); PM.add(createNVPVisitorPass()); PM.add(createNVVMReflectPass()); PM.add(createNVPTXISelDag(TM)); PM.add(createNVPTXPeepholeOptPass()); if (TM.addPassesToEmitFile(PM, OS, DWOOS, FileType, PreserveLocals)) report_fatal_error(""Target machine does not support emission of this file type.""); PM.add(createBarrierNoopPass()); PM.doFinalization(); }" LLVM,NVPTX,387,"std::string getDataLayout() { if (Module->getTargetTriple().find(""nvptx"") == std::string::npos) { return """"; } const llvm::DataLayout &DL = Module->getDataLayout(); std::string DLString = DL.getStringRepresentation(); DLString += ""-nvptx""; if (DL.isBigEndian()) { DLString += ""-eb""; } else { DLString += ""-el""; } return DLString; }" LLVM,NVPTX,388,"int TargetInstrInfo * getInstrInfo() { TargetInstrInfo *NVPTXInstrInfo = new TargetInstrInfo(); NVPTXInstrInfo->setTarget(Triple(""nvptx"")); return NVPTXInstrInfo; }" LLVM,NVPTX,389, LLVM,NVPTX,390,"MRegisterInfo getRegisterInfo(Target target, MCRegisterInfo Abbr) { if (!isa(target)) { report_fatal_error(""target is not of type TargetInstrInfo""); } if (!target->hasNVPTX()) { report_fatal_error(""target does not have NVPTX as a target specific value""); } const PTXInstrInfo *PTXInfo = target->getPTXInfo(); MRegisterInfo RegInfo; RegInfo.NumRegs = PTXInfo->getNumRegs(); for (int i = 0; i < RegInfo.NumRegs; i++) { const MCRegisterClass &RC = PTXInfo->getRegisterClass(i); RegInfo.Regs[i].Name = RC.getName(); RegInfo.Regs[i].Size = RC.getSize(); }" LLVM,NVPTX,391,const TargetSubtargetInfo &getSubtargetImpl() const override { return Subtarget; } LLVM,NVPTX,392," bool DisableVerify = true) { const NVPTXTargetMachine &NTM = static_cast(TM); NVPTXPassConfig *TPC = new NVPTXPassConfig(NTM); MCStreamer *Streamer = 0; if (NTM.getTargetTriple().getArch() == Triple::nvptx64) { Streamer = llvm::createNVPTXSTMCStreamer(PM, OutContext, OutStream); } else { Streamer = llvm::createNVPTXAsmStreamer(PM, OutContext, OutStream); } TPC->setMCStreamer(Streamer); PM.add(TPC->createNVPTXISelDag(NTM)); PM.add(TPC->createNVPTXLowerKernelArgsPass(NTM)); PM.add(TPC->createNVPTXCopyKernelArgsPass(NTM)); PM.add(TPC->createNVPTXFrameLoweringPass(NTM)); PM.add(TPC->createNVPTXAsmPrinterPass(NTM, OutStream)); PM.add(TPC->createNVPTXIPRARegAllocPass(NTM)); PM.add(TPC->createNVPTXCodeGenPreparePass(NTM)); PM.add(TPC->createNVPTXWarpSizePass(NTM)); if (!DisableVerify) PM.add(createVerifierPass()); PM.add(createCFGSimplificationPass()); PM.add(createPromoteMemoryToRegisterPass()); PM.add(createMachineCodeEmissionPass(Streamer, DisableVerify)); }" LLVM,NVPTX,393,"llvm::PassConfig createPassConfig() { llvm::PassConfig passConfig; passConfig.addTargetPasses(NVPTX, NVPTX, NVPTX); return passConfig; }" LLVM,NVPTX,394,bool isMachineVerifierClean() { if (Target.getID() == NVPTX) { return true; } return false; } LLVM,NVPTX,395,"void addIRPasses(TargetPassConfig *TargetPassConfig) { TargetMachine *TM = &TargetPassConfig->getTM(); Triple TargetTriple = TM->getTargetTriple(); if (TargetTriple.getArch() != Triple::nvptx) { return; } PassBuilder &PB = TargetPassConfig->getOptPipeline(); LoopAnalysisManager &LAM = TargetPassConfig->getLAM(); FunctionAnalysisManager &FAM = TargetPassConfig->getFAM(); FunctionPassManager FPM; FPM.addPass(LoopSimplifyPass()); FPM.addPass(LICMPass()); FPM.addPass(NewGVNPass()); FPM.addPass(LazyValueInfoCacheAnalysis()); FPM.addPass(TwoAddressInstructionPass()); FPM.addPass(LateSimplifyPass()); PB.addExtensionsToPM(FPM); TargetPassConfig->addISelPrepare(); PB.addIRPasses(); TargetPassConfig->addPassesToHandleExceptions(); TargetPassConfig->addEarlyAsPossiblePasses(); FPM.addPass(FunctionPassManager()); TargetPassConfig->addMachinePasses(); TargetPassConfig->addISelFinalize(); TargetPassConfig->addMachineLateOptimization(); PB.addMachinePasses(); FPM.run(*TargetPassConfig->getFunctionPassManager(), *TargetPassConfig->getAnalysisManager()); }" LLVM,NVPTX,396, LLVM,NVPTX,397,"TargetIRAnalysis *getTargetIRAnalysis(StringRef nvptx1, StringRef nvptx2, StringRef nvptx3) { if (!TargetMachine::isNVPTX(TargetTriple)) return nullptr; return new TargetIRAnalysis(nvptx1, nvptx2, nvptx3); }" LLVM,NVPTX,398,"void getRegisterInfo(const TargetInstrInfo *TII) { TII->setTargetSpecificValue(NVPTX); const MRegisterInfo *MRI = TII->getRegisterInfo(); TII->setRegisterInfo(NVPTX, MRI); }" LLVM,NVPTX,399,"void addInstSelector(FunctionPassManager &FPM, const TargetMachine &TM) { FPM.add(createNVPTXInstructionSelectorPass(TM)); TargetLibraryInfoImpl TLII(TM.getTargetTriple()); FPM.add(new TargetLibraryInfoWrapperPass(TLII)); FPM.add(createBarrierNoopPass()); }" LLVM,NVPTX,400,void addIRPasses(Target *target) { target->addPass(NVPTXAddressSpaceTransformPass()); target->addPass(NVPTXStructLayoutTransformPass()); target->addPass(NVPTXVectorizationPass()); target->addPass(NVPTXInstructionCombiningPass()); target->addPass(NVPTXHighOrderTransformPass()); } LLVM,NVPTX,401,"namespace { class AddPostRegAlloc : public MachineFunctionPass { static char ID; const TargetMachine *TM; public: AddPostRegAlloc(TargetMachine *TM) : MachineFunctionPass(ID), TM(TM) {} bool runOnMachineFunction(MachineFunction &MF) override { bool Changed = false; if (!MF.getFunction().hasFnAttribute(""nvptx"")) { return Changed; } const NVPTXSubtarget &STI = MF.getSubtarget(); const NVPTXInstrInfo *TII = STI.getInstrInfo(); for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { if (MI.getOpcode() == NVPTX::NVPTX_FADD32 || MI.getOpcode() == NVPTX::NVPTX_FADD64) { const MCInstrDesc &Desc = MI.getDesc(); MI.setDesc(TII->get(Desc.OpInfo[0].RegClass->hasFP64() ? NVPTX::fadd64 : NVPTX::fadd32)); Changed = true; } } } return Changed; } StringRef getPassName() const override { return ""Add Post Register-Allocation Pass for NVPTX""; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addPreserved(); MachineFunctionPass::getAnalysisUsage(AU); } }; } char AddPostRegAlloc::ID = 0; FunctionPass *llvm::createAddPostRegAlloc(TargetMachine *TM) { return new AddPostRegAlloc(TM); }" LLVM,NVPTX,402,"bool addPreRegAlloc(Module &M, PassManager &PM, TargetMachine &TM) { const Target *T = TM.getTarget(); if (T->getName() != ""nvptx"") { return false; } const NVPTXSubtarget *ST = &TM.getSubtarget(M); const NVPTX::TargetArch Arch = ST->getComputeCapability(); const NVPTX::PTXVersion PTXVer = ST->getPTXVersion(); switch (Arch) { case NVPTX::ARCH_SM_20: if (PTXVer == NVPTX::PTX20) { PM.add(createNVPTXAddresSpaceCastPass()); } else if (PTXVer == NVPTX::PTX30) { PM.add(createNVPTXSSAAlignPass()); PM.add(createNVPTXAddresSpaceCastPass()); } break; case NVPTX::ARCH_SM_30: PM.add(createNVPTXMaterializeNVVMPass()); PM.add(createNVPTXFavorNonGenericAddrSpacesPass()); PM.add(createNVPTXSelectNVPTXRegAllocPass()); break; default: return false; } return true; }" LLVM,NVPTX,403, LLVM,NVPTX,404, LLVM,NVPTX,405,"void addRegAssignmentFast(LLVMPassManagerRef passMgr, bool shouldNotBeUsed = false) { if (shouldNotBeUsed) { errs() << ""Warning: NVPTX should not be used as target specific value!"" << '\n'; } addPassDeadCodeEliminationPass(passMgr); addPassRegisterCoalescer(passMgr); addPassLiveVariables(passMgr); addPassMachineFunctionPrinter(passMgr); }" LLVM,NVPTX,406,"void addIRPasses(Module& M, NVPTX nvptx, NVPTX nvptx, NVPTX nvptx, NVPTX nvptx, NVPTX nvptx) { PassManagerBuilder PMBuilder; PMBuilder.DisableTailCalls = true; PMBuilder.Inliner = createNVPTXFatbinInliningPass(); PMBuilder.VerifyInput = true; PMBuilder.VerifyOutput = true; PMBuilder.populateModulePassManager(MPM); PMBuilder.populateFunctionPassManager(FPM); PMBuilder.populateLTOPassManager(LPM); MPM.run(M); FPM.doInitialization(); for (Function &F : M) if (!F.isDeclaration()) FPM.run(F); FPM.doFinalization(); LPM.run(*M.getModule()); PMBuilder.OptLevel = 3; PMBuilder.populateFunctionPassManager(FPM); FPM.doInitialization(); for (Function &F : M) if (!F.isDeclaration()) FPM.run(F); FPM.doFinalization(); }" LLVM,NVPTX,407,void addIRPasses(){ FunctionPassManager PM = FunctionPassManager(Module); PM.add(createNVPTXIRTransform1()); PM.add(createNVPTXIRTransform2()); PM.add(createNVPTXIRTransform3()); PM.add(createNVPTXIRTransform4()); PM.add(createNVPTXIRTransform5()); PM.add(createNVPTXIRTransform6()); PM.add(createNVPTXIRTransform7()); PM.add(createNVPTXIRTransform8()); PM.run(); PassManager PM2 = PassManager(); PM2.add(createNVPTXPass1()); PM2.add(createNVPTXPass2()); PM2.add(createNVPTXPass3()); PM2.add(createNVPTXPass4()); PM2.add(createNVPTXPass5()); PM2.add(createNVPTXPass6()); PM2.add(createNVPTXPass7()); PM2.add(createNVPTXPass8()); PM.run(*Module); } LLVM,NVPTX,408,"TargetTransformInfo *getTargetTransformInfo(const Triple &TT) { if (TT.isNVPTX()) { NVPTXTargetMachine *TM = (NVPTXTargetMachine *)TM; return new NVPTXTargetTransformInfo(TM->getSubtargetImpl()); } return NULL; } TargetTransformInfo *TTI = getTargetTransformInfo(Triple(""nvptx--nvidia-cuda"")); if (!TTI) { errs() << ""Target is not NVPTX\n""; return -1; }" LLVM,NVPTX,409, LLVM,NVPTX,410,"TargetIRAnalysis getTargetIRAnalysis(Target &target, uint64_t nvptxTargetValue, Triple nvptxTargetTriple) { if (nvptxTargetTriple.getArch() != Triple::nvptx) { report_fatal_error(""Invalid target triple!""); } return NVPTXIRAnalysis(target, nvptxTargetValue); }" LLVM,NVPTX,411,"void registerPassBuilderCallbacks() { LLVMAddTargetSpecificNVPTXCallbacks(getNVPTXPassBuilderCallbacks, ""NVPTX"", ""NVPTX"", ""nvvm-reflect"", ""nvvm-intr-range""); } The function above is named ""registerPassBuilderCallbacks"". It calls the LLVMAddTargetSpecificNVPTXCallbacks function which takes in the callback function called ""getNVPTXPassBuilderCallbacks"" as its first argument and the target specific values as the remaining arguments. In this case, the target specific values are ""NVPTX"", ""NVPTX"", ""nvvm-reflect"", ""nvvm-intr-range"". The NVPTX callback function ""getNVPTXPassBuilderCallbacks"" can be defined as follows: void getNVPTXPassBuilderCallbacks(void *PassBuilder) { auto *PPBCB = static_cast(PassBuilder); PPBCB->registerCallback(""nvvm-reflect"", ®isterNVVMReflectPass); PPBCB->registerCallback(""nvvm-intr-range"", ®isterNVVMIntrRangePass); }" LLVM,NVPTX,412, LLVM,NVPTX,413,"void addIRPasses(Module *M, const TargetMachine *TM) { Triple TargetTriple(TM->getTargetTriple()); if (TargetTriple.getArch() == Triple::nvptx || TargetTriple.getArch() == Triple::nvptx64) { M->addPass(createNVPTXMemCpyOptPass()); M->addPass(createNVPTXLowerCtionalCFGPass()); M->addPass(createNVPTXLowerIVPass()); M->addPass(createNVPTXUnrollPass()); M->addPass(createNVPTXValueComparisonPropagationPass()); } }" LLVM,NVPTX,414,"class AddOptimizedRegAllocPass : public llvm::PassInfoMixin { public: AddOptimizedRegAllocPass() = default; llvm::PreservedAnalyses run(llvm::Function &F, llvm::FunctionAnalysisManager &FAM) { llvm::TargetMachine &TM = FAM.getResult(F); if (!TM.getTargetTriple().isNVPTX()) { return llvm::PreservedAnalyses::all(); }" LLVM,NVPTX,415,"bool createTargetRegisterAllocator(const Target &T, MachineFunction &MF, LiveIntervals &LIS) { if (T.getID() != TargetMachine::TM_NVPTX) { return false; } if (MF.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeNone)) { return false; } unsigned AllocNo = 0; MF.addPass(createNVPTXAssignRegistersPass(AllocNo)); LIS = getAnalysis(); LIS->computeRegUnitSets(); return true; }" LLVM,NVPTX,416," function GetTPC) { PMBuilder->addExtension(PassManagerBuilder::EP_EarlyAsPossible, [&] (const PassManagerBuilder &Builder, PassManagerBase &PM) { if (Builder.LibraryInfo) { PM.add(createTargetTransformInfoWrapperPass(GetTPC())); } PM.add(createCorrelatedValuePropagationPass()); return true; }); GetTPC()->addIRPass(createNVPTXExpandVCETestPass()); }" LLVM,NVPTX,417,"void addIRPasses(llvm::Module *M) { llvm::PassManagerBuilder PMBuilder; PMBuilder.OptLevel = 2; PMBuilder.populateModulePassManager(*M); llvm::Triple TT = M->getTargetTriple(); if (TT.getArch() == llvm::Triple::nvptx) { llvm::PassManagerBuilder::addNVPTXPasses(PMBuilder); M->setDataLayout(""e-i64:64-v16:16-n16:32:64""); M->addModuleFlag(llvm::Module::Warning, ""nvvm-reflect-list"", 1); } }" LLVM,NVPTX,418,"void addIRPasses(LLVMContext& C, PassManagerBase& PM, TargetMachine& TM) { NVPTXTargetMachine& PTM = static_cast(TM); PM.add(createTargetTransformInfoWrapperPass(PTM.getTargetIRAnalysis())); PM.add(createConstantMergePass()); PM.add(createCFGSimplificationPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createLowerSwitchPass()); PM.add(createNVPTXISelDag(PTM)); PM.add(createNVPTXRemoveBarriersPass()); PM.add(new NVPTXExtractAddrSpaceCast(PTM.getDataLayout())); PM.add(new NVPTXLowerAggrCopies()); PM.add(createNVPTXAssignValidGlobalIdsPass(PTM)); PM.add(new NVPTXFavorNonGenericAddrSpaces(PTM.getDataLayout())); PM.add(createNVPTXAssignComputeThreadIdsPass()); PM.add(new LowerPtrIntrinsics(PTM.getDataLayout())); PM.add(new LowerAtomicPass(PTM.getDataLayout())); PM.add(createNVPTXVectorLowering(PTM)); PM.add(new LowerFieldInits()); PM.add(createNVPTXLateLoweringPass()); PM.add(new NVPTXPropagateWideningArithmetic()); PM.add(new LowerStructArgs()); }" LLVM,NVPTX,419,"void addIRPasses(LLVMPassManager &PM, const TargetMachine &TM) { PM.add(createPromoteMemoryToRegisterPass()); PM.add(createInstructionCombiningPass()); PM.add(createCFGSimplificationPass()); PM.add(createJumpThreadingPass()); PM.add(createCFGSimplificationPass()); PM.add(createReassociatePass()); PM.add(createLoopRotatePass()); PM.add(createLICMPass()); PM.add(createLoopUnswitchPass(false, false)); PM.add(createLoopDeletionPass()); PM.add(createSimpleLoopUnrollPass()); PM.add(createLateCSEPass()); PM.add(createMemCpyOptPass()); PM.add(createGVNPass()); PM.add(createSCCPPass()); PM.add(createInstructionCombiningPass()); PM.add(createStripDeadPrototypesPass()); PM.add(createConstantPropagationPass()); if (TM.getTargetTriple().getArch() == Triple::nvptx || TM.getTargetTriple().getArch() == Triple::nvptx64 ) { PM.add(createNVPTXLowerAllocaPass()); PM.add(createNVPTXISelLoweringPass()); PM.add(createNVPTXPreAllocLoweringPass()); PM.add(createLowerInvokePass(TM)); PM.add(createTailCallEliminationPass()); PM.add(createNVPTXReplaceImageHandlesPass()); PM.add(createNVPTXInstrInfoPass()); PM.add(createNVPTXTargetTransformInfoPass()); } switch (TM.getTargetTriple().getArch()) { case Triple::nvptx: PM.add(createNVPTXISelDAGToDAGPass()); PM.add(createNVPTXFrameToArgsOffsetPass()); break; case Triple::nvptx64: PM.add(createNVPTX64ISelDAGToDAGPass()); PM.add(createNVPTX64FrameToArgsOffsetPass()); break; } }" LLVM,NVPTX,420, LLVM,NVPTX,421, LLVM,NVPTX,422,"void addIRPasses(TargetMachine &TM, PassManagerBase &PM) { Triple TargetTriple(TM.getTargetTriple()); bool is64Bit = TargetTriple.getArch() == Triple::nvptx64; PM.add(createNVPTXISelDag(TM, """", false)); PM.add(createNVPTXFloatingPointToNVVM(T)); if (TM.getOptLevel() > CodeGenOpt::None) { PM.add(createNVPTXFixIRBitwiseLogic()); PM.add(createNVPTXFixExecutionMode()); PM.add(createNVPTXLowerAlloca()); } if (TM.getOptLevel() > CodeGenOpt::Less) { PM.add(createAggressiveDCEPass()); PM.add(createCFGSimplificationPass()); PM.add(createLoopSimplifyPass()); PM.add(createInstSimplifierPass()); PM.add(createGVNPass()); } if (TM.getOptLevel() > CodeGenOpt::Aggressive) { PM.add(createSROAPass()); PM.add(createAggressiveDCEPass()); } PM.add(createNVPTXExternalSymbolPass()); PM.add(createNVPTXFixupSPIRLLVMDebugInfo()); PM.add(createNVPTXVectorLoweringPass()); PM.add(createNVPTXAssignValidGlobalNamesPass()); PM.add(createNVPTXAssignValidLocalNamesPass()); PM.add(createNVPTXConvertKernelToFunctionPass()); PM.add(createNVPTXAssignRegistersPass()); PM.add(createNVPTXInsertPTXVersionNPass()); PM.add(createNVPTXMemBarOptimizerPass()); PM.add(createNVPTXPeepholeOptPass()); PM.add(createNVPTXFavorNonGenericAddrSpacesPass()); PM.add(createNVPTXLowerAggrCopiesPass()); }" LLVM,NVPTX,423," addIRPasses(llvm::PassManagerBase *PM, Target) { switch (Target) { case NVPTX: break; default: llvm_unreachable(""Unsupported target for NVPTX backend""); } }" LLVM,NVPTX,424,"void addIRPasses(Module *M) { auto TM = M->getDataLayout().getTargetMachine(); TargetPassConfig *TPC = TM->createPassConfig(PM); TPC->addTargetPassesToEmitFile(PM, OS); PM.add(TPC); } " LLVM,NVPTX,425,"void addIRPasses(TargetMachine TM, PassManagerBase &PM) { PM.add(createSimpleInlinerPass()); PM.add(createGVNPass()); PM.add(createCFGSimplificationPass()); PM.add(createNVPTXLowerAllocaPass()); PM.add(createNVPTXISelDag(TM)); PM.add(createNVPTXCFGOptPass()); PM.add(createNVPTXAssignValidGlobalNamesPass()); PM.add(createNVVMReflectPass()); PM.add(createNVVMReflectOnVectorPass()); PM.add(createNVVMDWARFTransformPass()); PM.add(createNVPTXImageOptimizerPass()); }" LLVM,NVPTX,426, LLVM,NVPTX,427,"const DataLayout &getDataLayout(const Module &M) { const Triple &TT = M.getTargetTriple(); const Target *T = TargetRegistry::lookupTarget(TT.str()); assert(T && ""Unable to create target for module's target triple""); TargetOptions options; options.DataLayout = ""e-m:e-p:64:64:64-i64:64-v16:16-v32:32-n16:32:64""; options.MCOptions.AsmVerbose = true; std::string Error; const TargetMachine *TM = T->createTargetMachine(TT.str(), """", """", options, Reloc::PIC_, CodeModel::Default, CodeGenOpt::None, Error); return TM->createDataLayout(); }" LLVM,NVPTX,428,"if (!TM.isNVPTX()) { return nullptr; } Next, we create a TargetInstrInfo object specifically for NVPTX target using the createNVPTXInstrInfo function from the TargetInstrInfo class: TargetInstrInfo *TII = TM.getSubtargetImpl()->createNVPTXInstrInfo(); The createNVPTXInstrInfo function takes in the NVPTX target specific values and populates the TargetInstrInfo object with necessary information. We can then return this object after type casting it to the const pointer type: return static_cast(TII); The final getInstrInfo function will look something like this: const TargetInstrInfo *getInstrInfo(const TargetMachine &TM, NVPTXTargetLowering *TLI) { if (!TM.isNVPTX()) { return nullptr; } TargetInstrInfo *TII = TM.getSubtargetImpl()->createNVPTXInstrInfo(); return static_cast(TII); }" LLVM,NVPTX,429,TargetInstrInfoNVPTX& getRegisterInfo() { static TargetInstrInfoNVPTX NVPTXInstrInfo; return NVPTXInstrInfo; } LLVM,NVPTX,430,const TargetSubtargetInfo &getSubtargetImpl(const Class &target) { return target.NVPTX; } LLVM,NVPTX,431,"string getExplicitSectionGlobal(string Name, GlobalValue *D) { const Triple &T = TM.getTargetTriple(); switch (T.getArch()) { default: llvmUnreachable(""Unexpected target!""); case Triple::nvptx: { const GlobalVariable *GV = dyn_cast(D); if (!GV || GV->hasSection()) return StringRef(""""); if (Name == ""__cuda_libdevice_comat_table"") { return StringRef(""__nv_%cudart"", MAI->getFileFormatName()); } else { assert (Name.startswith(""__cuda_"")); return StringRef(""__nv_%s"", Name.substr(7)); } } } }" LLVM,NVPTX,432,"const char* getSectionForConstant(Constant* constant, SectionKind kind) { if (constant->getTargetSpecificValue() == ""NVPTX"") { if (kind == SectionKind::ReadOnly) { return "".nv.constant""; } else if (kind == SectionKind::Data) { return "".nv.data""; } else if (kind == SectionKind::Text) { return "".nv.exec""; } else { return nullptr; } } return nullptr; }" LLVM,NVPTX,433,"void Initialize() { DataLayout = ""e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64""; TargetTriple = ""nvptx-nvidia-cuda""; FPMath |= FFPazo | FFSet; Features.AddFeature(""+ptx42""); Features.AddFeature(""+ptx40""); AddrSpaces.AddAddressSpace(""shared"", 0); AddrSpaces.AddAddressSpace(""global"", 3); AddWriteTargetAttrsToEndOfFnAttrList(FastMath); }" LLVM,NVPTX,434, LLVM,NVPTX,435,"std::string getSectionForConstant(Constant *C, StringRef TargetSpecificType) { SectionKind Kind = C->getSectionKind(); if (TargetSpecificType == ""NVPTX"") { if (Kind.isText()) return ""nvvm.kernel""; else if (Kind.isBSS()) return ""nvvm.bss""; else if (Kind.isReadOnly()) return ""nvvm.const""; else return ""nvvm.global""; } return """"; }" LLVM,NVPTX,436, LLVM,NVPTX,437,"Section getSectionForConstant(Constant constant, SectionKind sectionKind) { switch (sectionKind) { case SectionKind::ReadOnly: return Section::ConstantReadOnly; case SectionKind::ReadWrite: return Section::ConstantReadWrite; case SectionKind::Merge: return Section::ConstantMerge; case SectionKind::ThreadLocal: return Section::ConstantThreadLocal; case SectionKind::SmallData: return Section::ConstantSmallData; default: assert(false && ""Unsupported section kind for constant!""); } }" LLVM,NVPTX,438,"void NVPTXTargetMachine::Initialize(const Target &T) { InitNVPTXLowering(T); Attributes = CreateTargetMachineAttributes( T, getTargetTriple() ); InstructionSelector = createNVPTXInstructionSelector(*this, getSubtargetImpl(), getCodeInfo()); CodeGenOpts.MCOptions.AsmVerbose = true; CodeGenOpts.MCOptions.MCUseDwarfDirectory = true; setSubtargetForCodeGen(*getSubtargetImpl()); GlobalISel.reset(createNVPTXGISel()); RegisterPassManagerBuilder buildForNVPTX(*this); addPassesToEmitFile(*PM, *MFM, nullptr, CODEGEN_FILETY, CodeGenOpts.EmitCodeView, CodeGenOpts.EmitCodeViewReloc); }" LLVM,NVPTX,439,"String getSectionForConstant(SectionKind Kind) { if (Kind.isThreadLocal() || Kind.isGlobal()) { return "".data""; } else if (Kind.isText()) { return "".text""; } else if (Kind.isReadOnly()) { return "".const""; } return """"; }" LLVM,NVPTX,440,"SectionKind getSectionForConstant(const Constant *C, const TargetMachine *TM) { assert(TM->getTargetTriple().isNVPTX() && ""This function is for NVPTX backend only!""); SectionKind Kind = C->getSectionKind(); const NVPTXTargetMachine *NVTM = static_cast(TM); const NVPTXSubtarget *ST = &NVTM->getSubtarget(); const NVPTXTargetLowering *TLI = ST->getTargetLowering(); const NVPTXTargetObjectFile *TLOF = ST->getTargetObjectFile(); if (Kind.isReadOnly()) { return TLOF->getReadOnlySection(); } else if (Kind.isReadWrite()) { return TLOF->getThreadLocalSection(); } else if (Kind.isText()) { return TLOF->getTextSection(); } else if (Kind.isData()) { return TLOF->getDataSection(); } return SectionKind::getMetadata(); }" LLVM,NVPTX,441,void Initialize() { NVPTX::InitGlobalISel(); NVPTX::InitLowerIncompatibleDefaultOperations(); NVPTX::InitGlobalAddressLowering(); NVPTX::InitFunctionLowering(); NVPTX::InitStackPointerAndFrameLowering(); NVPTX::InitInstrInfo(); NVPTX::InitTargetLowering(); NVPTX::InitRegisterInfo(); NVPTX::InitLowerThreadLocalMetadata(); NVPTX::InitTargetMachine(); NVPTX::InitPasses(); NVPTX::InitAsmPrinter(); NVPTX::InitAsmParser(); NVPTX::InitMIRParser(); NVPTX::InitMCCodeGenInfo(); NVPTX::InitMCInstrAnalysis(); } LLVM,NVPTX,442, LLVM,NVPTX,443,"Section *getSectionForConstant(Constant *C, SectionKind Kind) { LLVMContext &Cxt = C->getContext(); const TargetMachine &TM = getTargetMachine(); const Triple &TT = TM.getTargetTriple(); if (TT.getArch() == Triple::nvptx || TT.getArch() == Triple::nvptx64) { switch (Kind) { case SectionKind::Data: return Cxt.getOrCreateSection("".nv.data"", SectionKind::Data, 0, SectionFlags::Writeable); case SectionKind::ReadOnly: return Cxt.getOrCreateSection("".nv.constant"", SectionKind::ReadOnly, 0, SectionFlags::None); case SectionKind::BSS: return Cxt.getOrCreateSection("".nv.bss"", SectionKind::BSS, 0, SectionFlags::Writeable); case SectionKind::Text: return Cxt.getOrCreateSection("".nv.text"", SectionKind::Text, 0, SectionFlags::Executable); default: return nullptr; } } return Cxt.getOrCreateSection("""", Kind, 0, SectionFlags::None); }" LLVM,NVPTX,444,"void Initialize() { TargetOptions Options; Options.GPUArch = NVPTX; Options.UseFastMath = true; TargetMachine *TM = Triple(TheTriple).createTargetMachine(TheTriple, """", """", Options, None, None, CodeGenOpt::Aggressive); PassRegistry *Registry = PassRegistry::getPassRegistry(); initializeCore(*Registry); initializeCodeGen(*Registry); initializeLowerIntrinsicsPass(*Registry); initializePeepholeOptimizerPass(*Registry); initializeInstructionCombiningPass(*Registry); initializeScalarOpts(*Registry); initializeVectorization(*Registry); initializeIROptimization(*Registry); initializeLoopStrengthReducePass(*Registry); initializeUnreachableBlockElimPass(*Registry); initializeConstantHoistingPass(*Registry); initializeScalarizeMaskedMemIntrinPass(*Registry); initializeExpandMemCmpPass(*Registry); initializeCodeGenPreparePass(*Registry); initilaizeNvptxLowerAggrCopiesPass(*Registry); }" LLVM,NVPTX,445, LLVM,NVPTX,446,"Section getSectionForConstant(SectionKind sectionKind, const TargetMachine &TM, bool isFromGlobal) { if (sectionKind == TextSection) { return Section("".text"", SectionKind::Text); } else if (sectionKind == ReadOnlyDataSection) { return Section("".rodata"", SectionKind::ReadOnlyData); } if (isFromGlobal) { if (sectionKind == TextSection) { return Section("".global"", SectionKind::Data); } else if (sectionKind == ReadOnlyDataSection) { return Section("".global_const"", SectionKind::ReadOnly); } } else { if (sectionKind == TextSection) { return Section("".local"", SectionKind::Data); } else if (sectionKind == ReadOnlyDataSection) { return Section("".local_const"", SectionKind::ReadOnly); }" LLVM,NVPTX,447," SmallVectorImpl &Out) { if (TT.getArch() != Triple::nvptx) { return; } if (const auto *Attr = GV->getSection()) { StringRef Section = Attr->getString().drop_back(); Out.push_back('.'); Out.append(Section.begin(), Section.end()); Out.push_back('.'); } }" LLVM,NVPTX,448,"SectionKind getSectionForConstant(const Constant *C) { if (const GlobalValue *GV = dyn_cast(C)) { if (GV->hasSection() && GV->getSection() == "".nv_fatbin"") { return SectionKind::getMetadata(); } else if (GV->hasSection() && GV->getSection() == "".nv_constant"") { return SectionKind::getReadOnly(); } } else if (const MetadataAsValue *MD = dyn_cast(C)) { if (MD->hasMetadata() && MD->getMetadata()->getSection() == "".nv_fatbin"") { return SectionKind::getMetadata(); } } return SectionKind::getReadOnly(); }" LLVM,NVPTX,449,"void Initialize() { TargetRegistry::RegisterTarget(TheNVPTXTarget, ""nvptx"", ""NVPTX (NVIDIA PTX 32-bit)"", PTXTargetMachine::Create); } ``` This function registers the NVPTX target and sets it to be the default target. It also creates a target machine for NVPTX called PTXTargetMachine. Next, the function initializes the TargetOptions for NVPTX. This includes setting the target specific values: ``` class NVPTXTargetOptions { NVPTX::TargetFeatures Features; public: NVPTXTargetOptions() = default; const NVPTX::TargetFeatures &getFeatures() const { return Features; } NVPTX::TargetFeatures &getFeatures() { return Features; } }; ``` The TargetOptions class contains a field called Features, which is a struct that holds all the target specific values for NVPTX. In this case, it is of type NVPTX::TargetFeatures. Lastly, the function sets the target specific values for NVPTX. These values can be changed as needed for different targets. ``` void Initialize() { TargetRegistry::RegisterTarget(TheNVPTXTarget, ""nvptx"", ""NVPTX (NVIDIA PTX 32-bit)"", PTXTargetMachine::Create); NVPTXTargetOptions Options; Options.getFeatures().setFeature(NVPTX::Feature1, true); Options.getFeatures().setFeature(NVPTX::Feature2, false); ... Options.getFeatures().setFeature(NVPTX::FeatureN, true); setTargetOptions(Options); }" LLVM,NVPTX,450, LLVM,NVPTX,451, LLVM,NVPTX,452,"void emitRawBytes(std::vector Data) { for (int i = 0; i < Data.size(); i++) { outs() << format(""%0*X"", 2, Data[i]); } outs() << "",""; }" LLVM,NVPTX,453,"void outputDwarfFileDirectives(raw_ostream &stream, const MCAsmInfo &MAI, const MCSubtargetInfo &STI) { for (const std::string &File : MAI.getDwarfFileDirectivePrefix()) { stream << ""\t.file\t"" << File << '\n'; } stream << ""\t.target\tNVPTX,NVPTX\n""; }" LLVM,NVPTX,454,"void changeSection() { TargetStreamer *Streamer = static_cast(OutStreamer->getTargetStreamer()); if (!Streamer) return; Streamer->changeSection(""NVPTX"", ""NVPTX"", ""SubSection is not null!"", ""//\t}\n"", ""//\t.section"", ""//\t{\n""); }" LLVM,NVPTX,455,"void changeSection(NVPTX, SubSection, openingBracket, closingBracket) { streamer << NVPTX << "",""; if (SubSection != null) { streamer << NVPTX << "",""; } streamer << openingBracket << ""\n""; if (SubSection != null) { streamer << ""\t"" << SubSection << ""\n""; } streamer << closingBracket << ""\n""; }" LLVM,NVPTX,456,"void closeLastSection() { MachineFunction &MF = MF; if (MF.getTarget().getTargetTriple().getArch() != Triple::nvptx) { return; } std::string NVPTXClosingString(""\t}""); MachineBasicBlock *CloseBB = MF.CreateMachineBasicBlock(); MachineFunction::iterator endBB = MF.end(); if (endBB == MF.begin()) { return; } --endBB; MF.insert(endBB, CloseBB); MachineInstr *closingInst = MF.CreateMachineInstr(NVPTX::NVPTX, *CloseBB, MF.CreateTempVirtualRegister(MVT::Other)); closingInst->addOperand(MachineOperand::CreateImm(NVPTXClosingString)); CloseBB->insert(CloseBB->end(), closingInst); }" LLVM,NVPTX,457,"void changeSection() { MCStreamer &Streamer = getStreamer(); if (SubSection != NULL) { Streamer.EmitRawText(StringRef(""\t}\n"")); Streamer.EmitRawText(StringRef(""\t.section\t{\n"")); } }" LLVM,NVPTX,458,"void outputDwarfFileDirectives(StreamWriter& streamer) { streamer << "".file "" << ""NVPTX,NVPTX\n""; streamer.flush(); }" LLVM,NVPTX,459, LLVM,NVPTX,460,"void changeSection(NVPTX* nvptx, const char* section) { if (section == NULL) { printf(""Error: Section value cannot be null!\n""); return; } nvptx->Streamer->EmitBytes(""\t.section""); nvptx->Streamer->EmitBytes(section); nvptx->Streamer->EmitBytes(""\t{\n""); }" LLVM,NVPTX,461,"void closeLastSection() { const std::string NVPTX = ""NVPTX""; const std::string NVPTX_CLOSE = ""\t}""; outs() << NVPTX << NVPTX_CLOSE << ""\n""; }" LLVM,NVPTX,462,"void emitRawBytes(char *Data) { MachineBasicBlock &MBB = *MF->CreateMachineBasicBlock(MBB->getBasicBlock()); MachineFunction::iterator InsertPos = MBB.begin(); DebugLoc DL; MachineInstr *MI = BuildMI(MF, DL, TII->get(NVPTX::DATA), ""NvPTXData""); MI->addImm(NVPTX); MI->addImm(NVPTX); MI->addImm(40); MI->addImm(1); MI->addImm(1); MI->addImm(0); MI->addImm(128); MI->addImm(1); MI->addImm(1); MI->addImm("",""); MI->addMBB(NvPTXData); MBB.insert(InsertPos, MI); ++InsertPos; }" LLVM,NVPTX,463,"void outputDwarfFileDirectives(raw_ostream &OS, const TargetMachine &TM) { OS << '\n'; OS << "" .section .debug_info\n\n""; OS << "" .file 1""; OS << '""' << ""dummy_c_source"" << '""'; OS << '""' << '""' << '\n'; OS << '\n'; OS << '\n'; }" LLVM,NVPTX,464,"unsigned int getArithmeticInstrCost(unsigned Opcode, MVT VT, MVT ? ST, unsigned NumOps, bool preferConst, bool isFP) { } ``` The function will take in the following parameters: - *Opcode* (unsigned): The opcode of the instruction. - *VT* (MVT): The value type of the instruction. - *ST* (MVT ? ): The value type of the single result of the instruction. - *NumOps* (unsigned): The number of operands for the instruction. - *preferConst* (bool): A flag that indicates whether the instruction prefers constant operands or not. - *isFP* (bool): A flag that indicates whether the instruction is a floating-point operation or not. The function should return an unsigned integer, representing the approximate reciprocal throughput of the instruction. The calculation of the instruction cost can be done as follows: 1. Get the reciprocal throughput of the target architecture for the given instruction and value type. This can be obtained dynamically from the backend's target-specific properties. 2. If the given instruction is a floating-point operation (isFP is true), multiply the reciprocal throughput by 2 as floating-point operations are typically slower on NVPTX backend. 3. Adjust the throughput based on the number of operands and preferred operand type. If the instruction prefers constant operands (preferConst is true), the throughput should be increased by 1. If the instruction has more than 2 operands (NumOps > 2), the throughput should be divided by 2. 4. Return the adjusted calculation as the instruction cost. The final implementation for the function will look as follows: ``` unsigned int getArithmeticInstrCost(unsigned Opcode, MVT VT, MVT ? ST, unsigned NumOps, bool preferConst, bool isFP) { unsigned int throughput = getTargetRecipThruput(Opcode, VT); if (isFP) { throughput *= 2; } if (preferConst) { throughput += 1; } if (NumOps > 2) { throughput /= 2; } return throughput; }" LLVM,NVPTX,465,llvm::TargetLowering* getTLI(llvm::TargetMachine* TM) { return TM->getTargetLowering(); } LLVM,NVPTX,466,static TargetTransformInfo::UnrollingPreferences getUnrollingPreferences() { TargetTransformInfo::UnrollingPreferences UP; UP.AllowsUnroll = true; UP.Interleave = true; UP.Vectorize = false; UP.Partial = TargetTransformInfo::UnrollingPreferences::UF_NVPTX; UP.Runtime = TargetTransformInfo::UnrollingPreferences::UR_NVPTX; UP.Vector = 4; UP.FavorSmallSize = true; return UP; } LLVM,NVPTX,467,bool hasBranchDivergence(const MachineFunction &MF) { const TargetSubtargetInfo &ST = MF.getSubtarget(); if (!ST.isNVPTX()) return false; for (const MachineBasicBlock &MBB : MF) { const MachineInstr *BranchInst = MBB.getFirstTerminator(); if (BranchInst && BranchInst->isConditionalBranch()) { const MachineOperand &Op1 = BranchInst->getOperand(0); const MachineOperand &Op2 = BranchInst->getOperand(1); if (Op1.getReg() != Op2.getReg()) { return true; } } } return false; } LLVM,NVPTX,468,"bool isSourceOfDivergence(Value *V) { if (!V->getType()->isPointerTy()) return false; Type *Ty = V->getType()->getPointerElementType(); if (!Ty->isVectorTy()) return false; unsigned VecSize = Ty->getVectorNumElements(); if (VecSize == 1) return false; const TargetLibraryInfo *TLI = &getAnalysis().getTLI(); TargetSpecificInfo TSInfo(TLI); if (TSInfo.getScalarTargetSpecificValue(V) != ""NVPTX"") return false; Value *Mask = TSInfo.getVectorMask(V); if (Mask != nullptr) return true; return false; }" LLVM,NVPTX,469,"InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *ScalarTy, Type *VectorTy, const TargetRegisterInfo *TRI) { assert(ScalarTy && ""Scalar type cannot be null.""); assert(VectorTy && ""Vector type cannot be null.""); if (TargetTransformInfo::TCC::getValue() != NVPTX || (TargetTransformInfo::TCC::getValue() == NVPTX && Opcode == ISD::ADD || Opcode == ISD::MUL || Opcode == ISD::XOR || Opcode == ISD::OR || Opcode == ISD::AND) || ScalarTy == MVT::i64 || VectorTy == MVT::i64 || cast(VectorTy)->getNumElements() == 2) { int Cost = 0; switch(Opcode) { case ISD::ADD: Cost = 1; break; case ISD::MUL: Cost = 2; break; case ISD::XOR: case ISD::OR: case ISD::AND: Cost = 1; break; default: llvm_unreachable(""This instruction is not supported on NVPTX backend.""); } return InstructionCost::getThroughputCost(Cost); } return InstructionCost::getCost(1); }" LLVM,NVPTX,470,"struct PeelingPrefs getPeelingPreferences(NVPTX, LVPTX) { struct PeelingPrefs preferences; preferences.peelingFactor = 2; return preferences; }" LLVM,NVPTX,471,"llvm::UnrollingPreferences getUnrollingPreferences(const llvm::TargetOptions &Options, StringRef TargetCPU, StringRef TargetFeature, unsigned UnrollingCount) { llvm::UnrollingPreferences prefs; prefs.Count = UnrollingCount; if (Options.hasAtomics()) { prefs.TargetUnrollPreferences = llvm::TargetTransformInfo::UnrollingPreferences::UP_NVPTX; } return prefs; }" LLVM,NVPTX,472,"Function* instCombineIntrinsic(IntrinsicInst* inst, LLVMContext& ctx, IRBuilder<>& builder) { Value* nvptx = builder.getInt32(NVPTX); Value* nvptx = builder.getInt32(NVPTX); switch(nvptx) { case NVPTX: { Value* add = builder.CreateAdd(nvptx, nvptx); add->setOperand(0, inst->getArgOperand(0)); return add; }" LLVM,NVPTX,473,"unsigned int getArithmeticInstrCost(unsigned int Opcode, Type *Ty, TargetCostKind CostKind, InstructionCost &Cost){ if (CostKind != TargetCostKind::RecipThroughput){ return 1; } if (Ty != MVT::i64){ return 1; } switch (Opcode) { case ISD::ADD: Cost = 2; break; case ISD::MUL: Cost = 2; break; case ISD::XOR: Cost = 2; break; case ISD::OR: Cost = 2; break; case ISD::AND: Cost = 2; break; default: Cost = 0; break; } return Cost * 2; }" LLVM,NVPTX,474,"unsigned int getArithmeticInstrCost(const TargetTransformInfo& TTI, TTI::TargetCostKind CostKind, Type* Ty1, Type* Ty2) { if (TTI.getSubtarget().getTargetTriple().getArch() != Triple::nvptx) return 0; if (CostKind != TTI::TCK_RecipThroughput) return 0; if (Ty1->getPrimitiveSizeInBits() != 64 || Ty2->getPrimitiveSizeInBits() != 64) return 0; unsigned int Cost = TTI.getIntrinsicCost (Intrinsic::nvptx_add_i64, Ty1) + TTI.getIntrinsicCost (Intrinsic::nvptx_mul_i64, Ty1) + TTI.getIntrinsicCost (Intrinsic::nvptx_xor_i64, Ty1) + TTI.getIntrinsicCost (Intrinsic::nvptx_or_i64, Ty1) + TTI.getIntrinsicCost (Intrinsic::nvptx_and_i64, Ty1); return Cost; }" LLVM,NVPTX,475, LLVM,NVPTX,476,bool canHaveNonUndefGlobalInitializerInAddressSpace(uint32_t AddressSpace) { return AddressSpace == 0; } LLVM,NVPTX,477, LLVM,NVPTX,478,void getAnalysisUsage(AnalysisUsage &AU) const override { MachineFunctionPass::getAnalysisUsage(AU); AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addPreserved(); } LLVM,NVPTX,479,"unsigned getArithmeticInstrCost(TargetOpcode Opcode, MVT VT) { switch (Opcode) { case NVPTX::ADD: case NVPTX::MUL: case NVPTX::XOR: case NVPTX::OR: case NVPTX::AND: if (VT == MVT::i64) { return 2; } break; } return 1; }" LLVM,NVPTX,480,bool hasBranchDivergence(const llvm::Function *F) { const llvm::TargetMachine *TM = F->getParent()->getTargetMachine(); const llvm::TargetSubtargetInfo *STI = TM->getSubtargetImpl(F); const llvm::NVPTXSubtarget *ST = static_cast(STI); return ST->getGenLoops() && ST->getCapBranchDivergence(); } LLVM,NVPTX,481,llvm::ImmutablePass *initializePass() { llvm::Value *nvptxValue = return new llvm::ImmutablePass(nvptxValue); } LLVM,NVPTX,482,"int getArithmeticInstrCost(NVPTXTargetLowerInfo &TLInfo, TTI::InstructionRemark &Remark, TargetTransformInfo::TargetCostKind CostKind, EVT VT, unsigned NumOperands) { if(!TLInfo.isArithmeticInstr(VT) || !TLInfo.isLegalArithType(VT)) return -1; if(VT == MVT::i64) { switch(NumOperands) { case 2: return 14; case 1: return 14; default: return -1; } } else { return -1; } }" LLVM,NVPTX,483, LLVM,NVPTX,484,"unsigned int getArithmeticInstrCost(OpCode op, ValueType type, int numOps, int latency) { if (op != ISD::ADD && op != ISD::MUL && op != ISD::XOR && op != ISD::OR && op != ISD::AND) { return 0; } if (type != MVT::i64) { return 0; } if (numOps != 2) { return 0; } unsigned int throughput = 0; switch (op) { case ISD::ADD: case ISD::MUL: throughput = 1; break; case ISD::XOR: case ISD::OR: case ISD::AND: throughput = 0.5; break; } unsigned int cost = throughput * latency; return cost; }" LLVM,NVPTX,485, LLVM,NVPTX,486,"int getNumberOfRegisters() { llvm::TargetMachine *targetMachine = llvm::EngineBuilder().selectTarget(); if (!targetMachine || targetMachine->getTargetTriple().getArch() != llvm::Triple::nvptx) { return 0; } const llvm::TargetSubtargetInfo *subtargetInfo = targetMachine->getSubtargetImpl(llvm::StringRef(""nvptx"")); int numRegs = subtargetInfo->getRegInfo().getNumRegs(); return numRegs; }" LLVM,NVPTX,487,"llvm::UnrollingPreferences getUnrollingPreferences() { llvm::UnrollingPreferences preferences; preferences.setTargetUnrollingPreferences(""NVPTX"", 4); return preferences; }" LLVM,NVPTX,488,bool hasVolatileVariant(Instruction *I) { if (I->mayReadOrWriteMemory()) { Triple Target = I->getModule()->getTargetTriple(); if (Target.getArch() == Triple::nvptx) { Instruction *VI = I->getVolatileVariant(); if (VI != nullptr) { return true; } } } return false; } LLVM,NVPTX,489,"unsigned int getAlign(Instruction* inst) { if (inst->isLoad() || inst->isStore()) { Value* ptrOperand = inst->getPointerOperand(); if (PointerType* ptrType = dyn_cast(ptrOperand->getType())) { unsigned int ptrAlign = ptrType->getAlignment(); if (ptrAlign != 0) { return min(ptrAlign, 16); } else { return 16; } } } return 16; }" LLVM,NVPTX,490," unsigned MaxKnownAlignment, unsigned DefaultAssumedAlignment) { if (TargetTriple == ""NVPTX"") { if (Name == ""callalign"") { return MinKnownAlignment; } } return DefaultAssumedAlignment; }" LLVM,NVPTX,491,"Instruction* getInst(Instruction* instruction) { if(instruction->getTargetSpecificValue() == ""NVPTX,0,0,0,0"") { return instruction->getDependentInst(); } else { return nullptr; } }" LLVM,NVPTX,492, LLVM,NVPTX,493,"unsigned getAlign(const Value *V) { if (const AlignedAttr *AA = V->getFnAttr(""align"")) { unsigned Align = AA->getAlignment(); if (Align > 0) { return Align; } } unsigned TargetAlign = V->getFnAttr(""nvptx"")->getValueAsInt(); return TargetAlign & TargetAlignMask; }" LLVM,NVPTX,494,"int getAlign(void* base, int offset, char* dereference) { if (strcmp(dereference, ""load"") == 0 || strcmp(dereference, ""store"") == 0) { return 16; } else { int addr = (int)base + offset; return addr & 16; } }" LLVM,NVPTX,495,unsigned getAlign() { unsigned callalign = NVPTX_CALLALIGN; unsigned minalign = NVPTX_MINALIGN; unsigned maxalign = NVPTX_MAXALIGN; if (callalign != 0) { return callalign; } if (minalign >= maxalign || minalign < NVPTX_DEFAULT_ALIGN) { return NVPTX_DEFAULT_ALIGN; } return minalign; } LLVM,NVPTX,496,Instruction* getInst(Dependency* dep) { Function* F = dep->getUser()->getParent(); if (F->getCallingConv() == CallingConv::NVPTX) { Value* depVal = dep->getDepValue(); if (depVal->getValueID() == Value::TargetSpecificConstant && depVal->getOperand(0) == (Value*) CallingConv::NVPTX) { ConstantInt* instID = cast(depVal->getOperand(1)); Instruction* inst = dyn_cast(F->getInstruction(instID->getZExtValue())); return inst; } } return nullptr; } LLVM,NVPTX,497,"namespace { #define NVPTX ""nvptx"" class NVPTXPass : public FunctionPass { public: static char ID; NVPTXPass() : FunctionPass(ID) {} bool runOnFunction(Function &F) override { auto &FAM = getAnalysisManager(); auto *PDT = &FAM.getResult(F); PDT->printAsDOT(F.getParent()->getModuleIdentifier() + ""."" + F.getName(), dbgs()); return false; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); AU.setPreservesAll(); AU.addRequired(); AU.addRequired(); }" LLVM,NVPTX,498,bool runOnFunction(Function &F) override { const Target *target = F.getParent()->getTarget(); if (!target || target->getTargetTriple().isNVPTX()) return false; return true; } LLVM,NVPTX,499, LLVM,NVPTX,500, LLVM,NVPTX,501,"void runOnFunction(Function &F) override { if (F.getName() == ""_reflect"") { if (!F.isDeclaration()) { report_fatal_error(""_reflect function should not have a body""); } if (!F.getReturnType()->isIntegerTy()) { report_fatal_error(""_reflect's return type should be integer""); } if (F.arg_size() != 4) { report_fatal_error(""Wrong number of operands to _reflect function""); } CallInst *CI = dyn_cast(&F.getEntryBlock().front()); if (!CI || CI->getCalledFunction()->getIntrinsicID() != Intrinsic::nvvm_reflect) { report_fatal_error(""Format of _reflect function not recognized""); } if (!CI->getOperand(0)->isNullValue() || !CI->getOperand(1)->isNullValue()) { report_fatal_error(""Format of _reflect function not recognized""); } if (isa(CI->getOperand(2)) && cast(CI->getOperand(2))->getZExtValue()) { report_fatal_error(""Format of _reflect function not recognized""); } if (isa(CI->getOperand(3))) { report_fatal_error(""Format of _reflect function not recognized""); } int i = 1; for (auto &arg : F.args()) { errs() << ""Arg "" << i++ << "" of _reflect: "" << arg.getName() << ""\n""; } F.addFnAttr(""__CUDA_FTZ""); F.addFnAttr(""nvvm-reflect-ftz""); } }" LLVM,NVPTX,502,"virtual bool runOnModule(Module &M) { if (!M.getTargetTriple().startswith(""nvptx"")) { return false; } for (Function &F : M.getFunctionList()) { if (F.hasFnAttribute(""_reflect"")) { if (F.isDeclaration()) { report_fatal_error(""_reflect function should have a body""); } if (!F.getReturnType()->isIntegerTy()) { report_fatal_error(""_reflect's return type should be integer""); } for (Instruction &I : instructions(&F)) { if (CallInst *CI = dyn_cast(&I)) { Function *CalledFunc = CI->getCalledFunction(); if (CalledFunc && CalledFunc->hasFnAttribute(""_reflect"")) { if (CI->getNumOperands() != 1) { report_fatal_error(""Only one operand expected for _reflect function""); } Value *Param = CI->getOperand(0); if (!isa(Param)) { report_fatal_error(""Expected a const-to-gen conversion""); }" LLVM,NVPTX,503,void getAnalysisUsage(AnalysisUsage &au) const override { au.addRequired(); au.addPreserved(); au.setPreservesCFG(); } LLVM,NVPTX,504,DominatorTree* run(Function& F) { TargetMachine* TM = get NVPTXTargetMachine(); LLVMContext& C = F.getContext(); LegacyPassManager PM; PM.add(createTargetTransformInfoWrapperPass(TM->getTargetIRAnalysis())); DominatorTree* DT = new DominatorTree(); DT->recalculate(F); PM.add(createDomTreePrinterPass()); PM.run(F); return DT; } LLVM,NVPTX,505,"void NVPTXTargetMachine::runOnFunction(Function &F) { NVPTXTargetLowering &TLI = *getSubtargetImpl(F)->getTargetLowering(); TLI.runOnFunction(F); } void NVPTXTargetLowering::runOnFunction(Function &F) { AttributeSet attrs = F.getAttributes(); attrs.addAttribute(F.getContext(), ""nvvmir"", ""1.0""); F.setAttributes(attrs); }" LLVM,NVPTX,506,"bool runOnFunction(Function &Func, TargetLibraryInfo &TLI) { Triple TargetTriple(Func.getParent()->getTargetTriple()); if (TargetTriple.getArch() != Triple::nvptx || !(TargetTriple.getVendor() == Triple::NVIDIA && TargetTriple.getOS() == Triple::Cuda)) { return false; }" LLVM,NVPTX,507, LLVM,NVPTX,508,"void runOnModule(LLVM::Module &M) { for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) { for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { if (CallInst *CI = dyn_cast(I)) { Function *Callee = CI->getCalledFunction(); if (Callee && Callee->getName() == ""_reflect"") { if (CI->getNumOperands() != 1) { reportError(""Only one operand expected for _reflect function""); } Value *Arg = CI->getOperand(0); if (!isa(Arg)) { reportError(""Expected a const-to-gen conversion for _reflect function""); } if (CI->getType()->isIntegerTy()) { } else { reportError(""_reflect's return type should be integer""); } } else { reportError(""Only a call instruction can use _reflect""); } } } } } }" LLVM,NVPTX,509, LLVM,NVPTX,510, LLVM,NVPTX,511,void NVPTXTargetMachine::getAnalysisUsage(AnalysisUsage &AU) const { TargetMachine::getAnalysisUsage(AU); AU.addRequired(); } LLVM,NVPTX,512,"void runOnModule(Module& M) override { Type* Int32Ty = Type::getInt32Ty(M.getContext()); Function* Func = M.getFunction(""myFunction""); BasicBlock* BB = &Func->getEntryBlock(); Instruction* InsertionPt = &*BB->getFirstInsertionPt(); Value* NewInst = BinaryOperator::Create(Instruction::Add, ConstantInt::get(Int32Ty, 5), ConstantInt::get(Int32Ty, 10), ""newInst""); InsertionPt->insertBefore(NewInst); }" LLVM,NVPTX,513,"std::string getPassName(const void *Pass) { if (Pass == &llvm::NVPTX::ConvertVectorTypesPass::ID) return ""NVPTX Convert Vector Types""; else return """"; }" LLVM,NVPTX,514, LLVM,ARC,0,"void EmitInstruction(Instruction *instr) { AdvanceHazardState(); if (auto *CI = dyn_cast(instr)) { if (CI->getCalledFunction()->getName() == ""ARC"" && CI->getNumOperands() == 3) { Value *arc = CI->getOperand(0); Value *arc2 = CI->getOperand(1); Value *128 = CI->getOperand(2); DebugValueInst *dbgInst = DebugValueInst::Create(arc, arc2, 128, ""Should be handled target independently""); dbgInst->insertAfter(instr); } } }" LLVM,ARC,1,void runOnMachineFunction(MachineFunction &MF) { TargetMachine &TM = MF.getTarget(); TM.setARCTargetSpecificValue(4); MF.emitARCBodyInstructions(); TM.setARCTargetSpecificValue(ARC); } LLVM,ARC,2,"void emitInstruction(ARCMachineFunction &MF, MCInst &Inst) { ARCODMCInst &ARCInst = static_cast(Inst); switch (ARCInst.getOpcode()) { case ARC::DBG_VALUE:{ MF.emitInstruction(ARCInst.getOpcode(), ARCInst.getOperands(), ARC::TARGET); break; } default: llvm_unreachable(""Unexpected instruction type!""); } }" LLVM,ARC,3,"string getPassName() { return ""ARC Assembly Printer""; }" LLVM,ARC,4,"void runOnMachineFunction(MachineFunction &MF) { ARCTargetMachine &TM = static_cast(MF.getTarget()); unsigned int TargetSpecificValue = TM.getTargetSpecificValue(); ARCInstrInfo &TII = *TM.getInstrInfo(); MachineBasicBlock *MBB = MF.CreateMachineBasicBlock(); MF.push_back(MBB); MachineBasicBlock::iterator I = MBB->begin(); BuildMI(*MBB, I, DebugLoc(), TII.get(ARC::ARC_ADD_imm), ARC::ARC) .addReg(ARC::ARC).addImm(TargetSpecificValue); BuildMI(*MBB, std::next(I), DebugLoc(), TII.get(ARC::ARC_RET)); }" LLVM,ARC,5,"void runOnMachineFunction(MachineFunction &MF) { ARCFunctionInfo *AFI = MF.getInfo(); EmitARCBranchFinalize(MF); dbgs() << ""Running ARC Branch Finalize on "" << MF.getName().str() << ""\n""; dbgs() << ""Estimated function size for "" << MF.getName().str() << "": ""; if (!AFI) { dbgs() << ""Unknown (or size 0) size for: ""; } else { dbgs() << AFI->getSize() << "": ""; } dbgs() << MF.getName().str() << ""\n""; generateMachineCode(MF); }" LLVM,ARC,6," MachineFrameInfo& MFI, StackMapLayout& SM, TargetLoweringBase* TLI) { MF->runFunctionBegin(); MF->runARCFinalize(); for (auto& BB : MF->getBasicBlocks()) { emitBasicBlock(BB, MBB, MFI, SM, TLI); BB->runOnMachineFunction(); }" LLVM,ARC,7,"std::string getPassName() { return ""ARC Branch Finalization Pass""; }" LLVM,ARC,8,"void runOnMachineFunction(const MachineFunction &MF){ O << ""Running ARC Branch Finalize on "" << MF.getName() << ""\n""; O << ""#define ARC 1"" << "" // Set ARC specific value\n""; O << ""#define ARC 0"" << ""\n""; O << ""Running ARC Branch Finalize on "" << ""\n""; O << ""#define ARC 0"" << ""\n""; unsigned int FuncSize = getFunctionSize(MF); O << ""Unknown (or size 0) size for: "" << MF.getName() << ""\n""; O << ""Estimated function size for "" << MF.getName() << "": \n""; O << ""\n""; EmitARCSpecificInstructions(MF); FuncSize += getTargetSpecificValue(); O << ""Estimated function size for "" << MF.getName() << "": "" << FuncSize << ""\n""; }" LLVM,ARC,9,"std::string getInstruction(unsigned char *bytes, int size) { LLVMInitializeARCTarget(); LLVMInitializeARCTargetInfo(); LLVMInitializeARCTargetMC(); LLVMInitializeARCMCAsmInfo(); LLVMInitializeARCMCCodeGenInfo(); LLVMInitializeARCMCInstrInfo(); LLVMInitializeARCMCLowerInstrInfo(); std::string error; Triple TheTriple(Triple::normalize("""")); TargetOptions options; std::unique_ptr target( TargetRegistry::lookupTarget("""", TheTriple, error)); if (!target) { return ""Error setting up target machine.""; } MCContext *mcc = new MCContext(); MCDisassembler *disasm = target->createMCDisassembler(TheTriple, *mcc); std::unique_ptr MCII(target->createMCInstrInfo()); std::unique_ptr InstPrinter( target->createMCInstPrinter(TheTriple, 0, *MCII, options)); std::unique_ptr ElfStreamer( target->createMCObjectStreamer( TheTriple, getContext(), *MCII, *SmallDataStreamer, **mcc, codeModel::Default, False, False, nullptr) ); MCStreamer *mcs = new MCStreamer(); mcs->InitSections(); SmallVector codevec; for (int i = 0; i < size; i++) { codevec.push_back(bytes[i]); }" LLVM,ARC,10, LLVM,ARC,11, LLVM,ARC,12," int imm, int size, int offset, int line) { string instruction = """"; if(size == 64) { instruction = decodeInstruction64(opcode, reg1, reg2, reg3, imm); if(instruction != """") { instruction += "", Successfully decoded "" + to_string(size) + ""-bit instruction."" + (line != -1 ? "" Line: "" + to_string(line) : """"); } else { instruction = decodeInstruction32(opcode, reg1, reg2, reg3, imm); if(instruction != """") { instruction += "", Not a "" + to_string(size) + ""-bit instruction, falling back to 32-bit.""; } } } else if(size == 16) { if(reg2 == 1) { instruction = decodeInstruction16(opcode, reg1, reg3, imm); if(instruction != """") { instruction += "", Successfully decoded "" + to_string(size) + ""-bit instruction with limm."" + (line != -1 ? "" Line: "" + to_string(line) : """"); } else { instruction = decodeInstruction16(opcode, reg1, reg3, 0); if(instruction != """") { instruction += "", Not a "" + to_string(size) + ""-bit instruction with limm, try without it.""; } } }" LLVM,ARC,13, LLVM,ARC,14, LLVM,ARC,15, LLVM,ARC,16, LLVM,ARC,17,"void runOnMachineFunction(MachineFunction &MF) { const TargetMachine &TM = MF.getTarget(); const TargetInstrInfo &TII = *TM.getInstrInfo(); const TargetRegisterInfo &TRI = *TM.getRegisterInfo(); for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { MachineBasicBlock &MBB = *I; for (MachineBasicBlock::iterator MI = MBB.begin(), ME = MBB.end(); MI != ME; ++MI) { MachineInstr &Inst = *MI; switch (Inst.getOpcode()) { case ARC::ST_FAR: case ARC::STH_FAR: case ARC::STB_FAR: Register TargetReg = Inst.getOperand(0).getReg(); Register StackPtrReg = TRI.getStackRegister(); BuildMI(MBB, MI, DebugLoc(), TII.get(ARC::LD)) .addReg(TargetReg, RegState::Define) .addReg(StackPtrReg); int Offset = Inst.getOperand(1).getImm(); BuildMI(MBB, MI, DebugLoc(), TII.get(ARC::ST)) .addReg(TargetReg, RegState::Kill) .addReg(StackPtrReg) .addImm(Offset); MI->eraseFromParent(); break; default: break; } } } }" LLVM,ARC,18,"bool determineCalleeSaves() { const MCPhysReg *CalleeSavedRegs = ARC::BLINK; unsigned NumCalleeSavedRegs = sizeof(CalleeSavedRegs)/sizeof(MCPhysReg); for (unsigned i = 0; i < NumCalleeSavedRegs; i++) { MCPhysReg CalleeSavedReg = CalleeSavedRegs[i]; if (CalleeSavedReg == ARC::BLINK) { TargetRegisterInfo::getReservedRegs(MF).addReg(CalleeSavedReg); } else { bool IsLiveIn = false; for (MachineFunction::livein_iterator I = MF.livein_begin(), E = MF.livein_end(); I != E; ++I) { if (I->first == CalleeSavedReg) { IsLiveIn = true; break; } } if (!IsLiveIn) { TargetRegisterInfo::getReservedRegs(MF).addReg(CalleeSavedReg); } } } return true; }" LLVM,ARC,19,"void eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB) { const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); for (auto I = MBB.instr_begin(), E = MBB.instr_end(); I != E;) { MachineInstr &MI = *I; if (MI.getOpcode() == ARC::ADJCALLSTACKDOWN || MI.getOpcode() == ARC::ADJCALLSTACKUP) { const MCInstrDesc &MCID = MI.getDesc(); if (MCID.isPseudo()) { if (TII->useCallFramePseudoInstructions(MF)) { LLVMContext &Ctx = MF.getFunction().getContext(); MachineInstr *NewMI = nullptr; if (MI.getOpcode() == ARC::ADJCALLSTACKDOWN) { NewMI = BuildMI(MF, MI.getDebugLoc(), TII->get(ARC::ADJCALLSTACKDOWN), MI.getOperand(0).getReg(), MI.getOperand(1).getImm()); } else { NewMI = BuildMI(MF, MI.getDebugLoc(), TII->get(ARC::ADJCALLSTACKUP), MI.getOperand(0).getReg(), MI.getOperand(1).getImm()); } MBB.insertAfter(MI, NewMI); MI.eraseFromParent(); I = NewMI->getIterator(); continue; } } } ++I; } }" LLVM,ARC,20,"void processFunctionBeforeFrameFinalized(MachineFunction &MF){ MachineFrameInfo &MFI = MF.getFrameInfo(); size_t stackSize = MFI.getStackSize(); outs() << ""Process function before frame finalized:\n""; outs() << ""Current stack size: "" << stackSize << ""\n""; const TargetRegisterClass *GPR32RC = MF.getRegInfo().getRegClass(ARC::GPR32RegClass); unsigned RegScavFI = MFI.CreateStackObject(GPR32RC->getSize(), GPR32RC->getAlignment(), false); outs() << ""Created scavenging index RegScavFI="" << RegScavFI << ""\n""; }" LLVM,ARC,21,"bool restoreCalleeSavedRegisters(MachineInstr &MI, MachineRegisterInfo &MRI) { bool isProfitable = true; const MCPhysReg *CalleeSavedRegs = MRI.getCalleeSavedRegs(); int Size = MRI.getNumVirtRegs(); for (int i = 0; i < Size; i++) { if (MRI.isAllocatable(CalleeSavedRegs[i])) { int StackSlot = MRI.getStackSlot(CalleeSavedRegs[i]); MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARC::LDWntausp), CalleeSavedRegs[i]); MIB.addFrameIndex(StackSlot); MI.getParent()->insert(MI, MIB); isProfitable = false; } } if (isProfitable) return true; else { MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARC::MOVi), ARC::R14).addImm(ARC::R14); MI.getParent()->insert(MI, MIB); return false; } }" LLVM,ARC,22, LLVM,ARC,23,"bool spillCalleeSavedRegisters(MachineFunction &MF) { const ARCSubtarget &Subtarget = MF.getSubtarget(); const ARCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); MachineRegisterInfo &MRI = MF.getRegInfo(); bool HasCalleeSavedRegs = false; for (unsigned Reg : RegInfo->getCalleeSavedRegs(MF)) { if (RegInfo->isCalleeSavedReg(MF, Reg)) { HasCalleeSavedRegs = true; break; } } if (!HasCalleeSavedRegs) return false; const TargetInstrInfo *TII = Subtarget.getInstrInfo(); for (unsigned Reg : RegInfo->getCalleeSavedRegs(MF)) { if (RegInfo->isCalleeSavedReg(MF, Reg)) { int FrameIdx = MFI.CreateStackObject(RegInfo->getSpillSize(MF, Reg), RegInfo->getSpillAlign(MF, Reg)); MRI.setSimpleHint(Reg, FrameIdx); int64_t sp = ARC::R14; BuildMI(MF, DebugLoc(), TII->get(ARC::MOV_GPRetImm), sp).addImm(FrameIdx); } } return true; }" LLVM,ARC,24, LLVM,ARC,25,"bool restoreCalleeSavedRegisters() { int targetVal = ARC::R14; if (targetVal * calleeSavedRegisterSize > callerSavedRegisterSize) return true; for (int i = 0; i < calleeSavedRegisterSize; ++i) { loadRegToStackSlot(targetVal, i); ++targetVal; } return false; }" LLVM,ARC,26, LLVM,ARC,27,"void determineCalleeSaves(MachineFunction &MF, TargetRegisterInfo &TRI) { const MCPhysReg *CalleeSavedRegs = TRI.getCalleeSavedRegs(&MF); unsigned NumCalleeSavedRegs = TRI.getNumCalleeSavedRegs(&MF); std::set CalleeSavedSet; for (unsigned i = 0; i < NumCalleeSavedRegs; ++i) { MCPhysReg CalleeSavedReg = CalleeSavedRegs[i]; if (CalleeSavedReg == ARC::BLINK) { CalleeSavedSet.insert(CalleeSavedReg); } else { const TargetRegisterClass *RC = TRI.getRegClass(CalleeSavedReg); unsigned SpillSize = TRI.getSpillSize(*RC); int CSR = TRI.getCallPreservedMask(&MF)[i]; if (CSR) { CalleeSavedSet.insert(CalleeSavedReg); } } }" LLVM,ARC,28,"void eliminateCallFramePseudoInstr(Instruction *Instr, ARCMachineFunctionInfo *MFI, MachineBasicBlock *MBB) { if (Instr->isCallFramePseudo()) { ARCCCState CCInfo; CCInfo.AnalyzeCallOperands(Chain, CC_ARVL, Ins, nullptr, nullptr); int CallFramePseudo = CCInfo.ARCallFramePseudo; if (CallFramePseudo != 0) { MBB->erase(Instr); CallFramePseudo += 2 * sizeof(int); Instr->addOperand( MachineOperand::CreateImm(CallFramePseudo)); MBB->insert(Instr); } } }" LLVM,ARC,29,"void processFunctionBeforeFrameFinalized(MachineFunction &MF, const TargetRegisterInfo *TRI) { MachineFrameInfo &MFI = MF.getFrameInfo(); dbgs() << ""Process function before frame finalized: \n""; dbgs() << ""Current stack size: "" << MFI.getLocalFrameSize() << ""\n""; int RegScavFI = MFI.CreateStackObject(TRI->getSpillSize(ARC::GPR32RegClass), TRI->getSpillAlignment(ARC::GPR32RegClass), false); dbgs() << ""Created scavenging index RegScavFI="" << RegScavFI << ""\n""; return; }" LLVM,ARC,30,"bool restoreCalleeSavedRegisters(MachineFunction &MF) { } Function definition: bool restoreCalleeSavedRegisters(MachineFunction &MF) { MachineBasicBlock &MBB = *MF.begin(); unsigned R14 = ARC::R14; MachineFunctionInfo &MFI = MF.getInfo(); if (!MFI.shouldRestoreCalleeSavedRegisters()) { return false; } int FI = MF.getFrameInfo().CreateStackObject(4, 4, false); for (const MCPhysReg *CSRegs = ARCRegisterInfo::getCalleeSavedRegs(); *CSRegs != 0; ++CSRegs) { unsigned Reg = *CSRegs; if (!ARCRegisterInfo::isLegalForClass(Reg, ARC::ARCREG_I32)) continue; const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg); unsigned SubIdx = ARCRegisterInfo::getSubRegIndex(RC, Reg); unsigned SubReg = ARCRegisterInfo::getSubReg(Reg, SubIdx); BuildMI(MBB, MBB.begin(), DebugLoc(), ARCInstrInfo::get(ARC::LDRi2), SubReg) .addFrameIndex(FI).addImm(0); FI += 4; } MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DebugLoc(), ARCInstrInfo::get(ARC::MOVAr2B)) .addReg(R14); AddDefaultCC(MIB); return true; }" LLVM,ARC,31,"bool spillCalleeSavedRegisters(MachineInstr *MI){ Register R14 = ARC::R14; const MCPhysReg *SavedRegs = MF.getCalleeSavedRegs(); for (unsigned i = 0; SavedRegs[i] != 0; ++i) { if (SavedRegs[i] != R14) { if (!storeRegToStackSlot(MI, i, true)) { return false; } } } return true; }" LLVM,ARC,32,bool hasFP(const Function &F) { return true; } LLVM,ARC,33,"void assignCalleeSavedSpillSlots(MachineFunction &MF) { MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); MachineRegisterInfo &MRI = MF.getRegInfo(); const MCPhysReg *CSRegs = MF.getSubtarget().getCalleeSavedRegs(&MF); std::map VVRCalleeSavedMap; for (unsigned i = 0; CSRegs[i] != 0; ++i) { if (!MRI.isAllocatable(CSRegs[i])) continue; MCPhysReg VVR = TRI->getVVRForCalleeSavedReg(CSRegs[i]); if (VVR != 0) VVRCalleeSavedMap[VVR] = CSRegs[i]; } for (auto &it : VVRCalleeSavedMap) { MCRegister VVR = MRI.getUniqueVReg(it.first); MCRegister CSReg = MRI.getUniqueVReg(it.second); MachineObject *VVRStackObject = MFI.CreateStackObject(RC); MRI.setSpillSlot(VVR, VVRStackObject); MachineObject *CSRegStackObject = MFI.getObjectForVReg(CSReg); MRI.setSpillSlot(CSReg, CSRegStackObject); } }" LLVM,ARC,34, LLVM,ARC,35, LLVM,ARC,36, LLVM,ARC,37,"void processFunctionBeforeFrameFinalized(MachineFunction &MF) { int stackSize = MF.getFrameInfo().getStackSize(); std::cout << ""Current stack size: "" << stackSize << ""\n""; const TargetRegisterClass *GPR32RegClass = ARC::GPR32RegClass; const TargetRegisterClass *RegScavFI = MF.getRegInfo().createScavengingIndex(GPR32RegClass); std::cout << ""Created scavenging index RegScavFI="" << RegScavFI << ""\n""; }" LLVM,ARC,38,"bool restoreCalleeSavedRegisters() { unsigned savedRegs[] = {ARC::R14}; if (savedRegs[0] == 0) return true; for (unsigned i = 0; i < savedRegs.size(); i++) { unsigned reg = savedRegs[i]; MachineInstr *loadInst = BuildMI(*BB, InsertPt, DL, TII->get(ARC::LD), ARC::SP) .addReg(reg) .addImm(0); BB->insert(InsertPt, loadInst); } return false; }" LLVM,ARC,39,"namespace { struct SpillCalleeSavedRegisters : public MachineFunctionPass { static char ID; SpillCalleeSavedRegisters() : MachineFunctionPass(ID) {} bool runOnMachineFunction(MachineFunction &MF) override; }; } bool SpillCalleeSavedRegisters::runOnMachineFunction(MachineFunction &MF) { bool modified = false; for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { if (MI.isCall()) { for (MachineOperand &MO : MI.operands()) { if (MO.isRegMask()) { for (unsigned Reg : MO.getRegMask()) { if (TargetRegisterInfo::isCalleeSaved(Reg)) { BuildMI(MBB, MI, DebugLoc(), MF.getSubtarget().getInstrInfo()->get(ARC::STW)) .addReg(ARC::R14) .addReg(Reg, RegState::Kill); modified = true; } } break; } } } } } return modified; }" LLVM,ARC,40,"void printInst(const MCInst& Inst, raw_ostream& OS, const MCAsmInfo& MAI){ if(Inst.getOpcode() == ARC::ARC){ OS << ""ARC ""; } else if(Inst.getOpcode() == ARC::ARC){ OS << ""ARC ""; } Inst.print(OS, &MAI); OS << ""\n""; }" LLVM,ARC,41,"void printInst(MCInst Inst, raw_ostream &OS) { if(Inst.getOpcode() == ARC::ARC) { OS << ""ARC""; } else if (Inst.getOpcode() == ARC::ARC) { OS << ""ARC""; } else { OS << Inst.getOpcode(); } for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i) { OS << ' '; const MCOperand &Op = Inst.getOperand(i); if (Op.isReg()) OS << getRegisterName(Op.getReg()); else if (Op.isImm()) OS << formatImm(Op.getImm()); else if (Op.isFPImm()) OS << formatFPImm(Op.getFPImm()); else if (Op.isExpr()) OS << *Op.getExpr(); else if (Op.isInst()) OS << *Op.getInst(); else llvm_unreachable(""Invalid operand kind""); } }" LLVM,ARC,42, LLVM,ARC,43,"bool analyzeBranch(MachineBasicBlock &MBB) { MachineInstr &MI = MBB.back(); for (int i = 0; i < MI.getNumOperands(); i++) { MachineOperand &MO = MI.getOperand(i); if (MO.isTargetSpecific()) { if (MO.getTargetFlags() == ARC) { return true; } if (MO.getTargetFlags() == ""FBB should have been null."") { return true; } } } return false; }" LLVM,ARC,44," unsigned int SPAdj, const DebugLoc &DL) { unsigned int TargetValue = ARC::LD_rs9; const TargetRegisterClass *GPR32RegClass = ARC::GPR32RegClass; if (RegisterClass != GPR32RegClass) { report_fatal_error(""Only support GPR32 register class for loading from stack""); } MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, DL, get(TargetValue)); MIB.addReg(RegisterClass); MIB.addFrameIndex(FrameIndex); MIB.addImm(SPAdj); unsigned int RegisterIndx = RegisterClass->getNumRegs() - 1; MIB.addReg(RegisterIndx); dbgs() << ""Created load reg="" << RegisterIndx << "" from FrameIndex="" << FrameIndex << ""\n""; MI.getParent()->insert(MI, MIB); return RegisterIndx; }" LLVM,ARC,45,"void storeRegToStackSlot(ARC::GPR32RegClass regClass, unsigned reg, int frameIndex) { assert(regClass.hasSubClassEq(ARC::GPR32RegClass) && ""Only support GPR32 stores to stack now""); assert(frameIndex % 4 == 0 && ""Only support 4-byte stores to stack now""); MachineFunction &MF = *(ARCMachineFunctionInfo *)Arg; MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); DebugLoc DL; MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(ARC::ST_rs9)) .addReg(frameIndex) .addImm(0) .addReg(reg); MIB->addMemOperand(MF.getMachineMemOperand( MachinePointerInfo(), MachineMemOperand::MOStore,4, Align(4))); MBB.insert(MBB.end(), MIB.getInstr()); MFI.setObjectAlignment(frameIndex, Align(4)); MFI.setStackHasCalls(true); const TargetRegisterClass *SizeRC = RC->getSubClass(Size); MF.getRegInfo().setRegClass(frameIndex, SizeRC); LLVM_DEBUG(MF.dump()); LLVM_DEBUG(MBB.dump()); LLVM_DEBUG(MIB->dump()); LLVM_DEBUG(dbgs() << "" to FrameIndex="" << frameIndex << ""\n""); }" LLVM,ARC,46," const MachineMemOperand *MMO) const { DEBUG(dbgs() << ""Creating load instruction for reg="" << TRI->getName(DestReg) << "" from frame index="" << FI << ""\n""); DebugLoc DL = MBB.findDebugLoc(MI); MachineFunction *MF = MBB.getParent(); auto &MFI = MF->getFrameInfo(); assert(RC && ""Invalid register class""); if (RC->getSize() != 4) { llvm_unreachable(""Only support 4-byte loads from stack now.""); } if (RC != &ARC::GPR32RegClass) { llvm_unreachable(""Only support GPR32 stores to stack now.""); } if (!MFI.isFixedObjectIndex(FI)) { llvm_unreachable(""Invalid frame index.""); }" LLVM,ARC,47," const DebugLoc &DL, unsigned SrcReg, int FrameIndex) { const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); MachineFrameInfo &MFI = MF.getFrameInfo(); if (!ARC::GPR32RegClass->contains(SrcReg)) { report_fatal_error(""Invalid GPR32 register""); } int Offset = MFI.getObjectOffset(FrameIndex); Offset += TFI.getOffsetOfLocalArea() + MFI.getStackSize(); if (MFI.getObjectSize(FrameIndex) != 4) { report_fatal_error(""Only support 4-byte stores to stack now.""); } MachineMemOperand::Flags MMOFlags; if (TRI.isStoreToStackSlotLegal(MF, SrcReg, FrameIndex)) { MMOFlags = MachineMemOperand::MOStore; } else { report_fatal_error(""Couldn't get MachineMemOperand for store to stack.""); } BuildMI(MBB, MBBI, DL, TII.get(ARC::ST_rs9)) .addReg(SrcReg, RegState::Kill) .addFrameIndex(FrameIndex) .addImm(0) .addMemOperand(MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FrameIndex), MMOFlags, 4)); dbgs() << ""Created store reg="" << TRI.getName(SrcReg) << "" to FrameIndex="" << FrameIndex << ""\n""; }" LLVM,ARC,48," int FrameIndex) { unsigned LoadOpcode = ARC::LD_rs9; const TargetRegisterClass *RC = &ARC::GPR32RegClass; if (!RC->contains(DestReg)) { report_fatal_error(""Can only load to a GPR32 register""); } const ARCSubtarget &Subtarget = MF.getSubtarget(); const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); const TargetRegisterInfo &TRI = *Subtarget.getRegisterInfo(); int64_t FrameOffset = MF.getFrameInfo().getObjectOffset(FrameIndex); Register SPReg = TRI.getStackRegister(); Register TempReg = TRI.getMatchingSuperReg(DestReg, ARC::sub_r0_0, &ARC::GPR32RegClass); BuildMI(MBB, MBBI, DebugLoc(), TII.get(LoadOpcode), DestReg) .addReg(SPReg) .addImm(FrameOffset) .addReg(TempReg); dbgs() << ""Created load "" << printReg(DestReg, TRI) << "" from FrameIndex "" << FrameIndex << ""\n""; }" LLVM,ARC,49,"void storeRegToStackSlot(MachineFunction& MF, Register Reg, int FrameIndex) { if (!ARC::GPR32RegClass.contains(Reg)) { errs() << ""Only support GPR32 stores to stack now.\n""; return; } MachineFrameInfo& MFI = MF.getFrameInfo(); const TargetInstrInfo* TII = MF.getSubtarget().getInstrInfo(); MachineRegisterInfo& MRI = MF.getRegInfo(); int64_t Offset = MFI.getObjectOffset(FrameIndex); MachineBasicBlock& MBB = MF.front(); MachineBasicBlock::iterator MBBI = MBB.begin(); DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); unsigned StoreOpcode = TII->getOpcode(ARC::ST_rs9); MachineInstrBuilder MIB = BuildMI(MF, DL, TII->get(StoreOpcode)); MIB.addReg(Reg); MIB.addFrameIndex(FrameIndex); MIB.addImm(0); MIB.addMemOperand(MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOStore, 4, 4)); MBB.insert(MBBI, &*MIB); }" LLVM,ARC,50, LLVM,ARC,51,"bool getBaseAndOffsetPosition(const MachineInstr *MI, unsigned &BaseReg, int64_t &Offset) { unsigned ARC = MI->getOperand(0).getImm(); if (ARC != 1 || ARC != 2) { return false; } BaseReg = MI->getOperand(1).getReg(); Offset = MI->getOperand(2).getImm(); if (!TargetRegisterInfo::isPhysicalRegister(BaseReg)) { return false; } if (Offset > INT_MAX || Offset < INT_MIN) { return false; } return true; }" LLVM,ARC,52,unsigned int ARC::getInstSizeInBytes(const MachineInstr *MI) const { switch (MI->getOpcode()) { default: return 0; case ARC::ADD_rr: case ARC::AND_rr: case ARC::CMP_rr: case ARC::CMP_rq: case ARC::CMP_rqh: case ARC::D32ADD_rr: case ARC::D32AND_rr: case ARC::D32OR_rr: case ARC::D32SUB_rr: case ARC::D32XOR_rr: case ARC::LDDR_rr: case ARC::LDW_rp: case ARC::MOV_rr: case ARC::MOVS_rr: case ARC::MOVD_rr: case ARC::OR_rr: case ARC::SUB_rr: case ARC::XOR_rr: return 4; case ARC::ADD_krr: case ARC::AND_krr: case ARC::CMP_krr: case ARC::D32ADD_krr: case ARC::D32AND_krr: case ARC::D32OR_krr: case ARC::D32SUB_krr: case ARC::D32XOR_krr: case ARC::LDR_rkp: case ARC::LDR_rrp: case ARC::LDW_rq: case ARC::MUL_rr: case ARC::MUL_qrr: case ARC::MUL_qrp: case ARC::MUL_qrph: case ARC::NOP: case ARC::OR_krr: case ARC::SWIX_rq: case ARC::SUB_krr: case ARC::XOR_krr: return 8; case ARC::BR: case ARC::J: return 12; case ARC::CASrr: case ARC::LDDR_rq: case ARC::MOVh_rr: case ARC::SWI: return 16; } } LLVM,ARC,53, LLVM,ARC,54,"bool isPostIncrement(const TargetInstrInfo *TII, const MachineInstr *MI) { unsigned int opcode = MI->getOpcode(); return TII->isPostIncrementOpcode(opcode); }" LLVM,ARC,55,"void copyPhysReg(bit RC, bit SRC, bit DST) { if (RC == ARC::GPR32RegClass) { if (SRC == ARC::GPR32RegClass && DST == ARC::GPR32RegClass) { ARC::MOV_rr MI = (ARC::MOV_rr)target specific value; MI.setSrcReg(SRC); MI.setDstReg(DST); BuildMI(MBB, I, DL, get(ARC::MOV_rr)).addReg(SRC).addReg(DST); } else { llvm_unreachable(""Only GPR32 src/dest copy supported.""); } } else { llvm_unreachable(""Only GPR32 src/dest copy supported.""); } }" LLVM,ARC,56,"void insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *FallThrough, int Cond, MachineBasicBlock *Dest, unsigned Reg1, unsigned Reg2, unsigned Opc) { if (!CodeGenFuncInfo.hasBigCode() || CodeGenFuncInfo.isThumbFunction()) { report_fatal_error(""Code size not handled.""); } if (!FallThrough) { report_fatal_error(""insertBranch must not be told to insert a fallthrough.""); } if (Cond == ARC::BR && ((Reg1 & ARC::BRcc_ru6_p) || (Reg2 & ARC::BRcc_rr_p))) { report_fatal_error(""ARC branch conditions have two components!""); } MachineInstrBuilder MIB = BuildMI(&MBB, MBB.end(), MBB.findDebugLoc(MBB.end()), TII->get(Opc)); MIB.addMBB(FallThrough); MIB.addMBB(Dest); MIB.addReg(Reg1); if (Opc == ARC::BRcc_rr_p || Opc == ARC::BRrr_p || Opc == ARC::BRcc_rr) { MIB.addReg(Reg2); } MIB.addImm(Cond); MBB.insert(MBB.end(), MIB); }" LLVM,ARC,57,"void loadImmediate(CodeGenFunction &CGF, llvm::Value *Dest, llvm::APInt ConstantValue, llvm::DebugLoc Loc) { llvm::Type *Ty = Dest->getType(); llvm::ConstantInt *ConstInt = llvm::ConstantInt::get(Ty, ConstantValue); llvm::SmallVector Args; Args.push_back(Dest); Args.push_back(ConstantInt); llvm::CallInst::Create(ARCRuntime::getARCLongImmediateFn(), Args, """", CGF.Builder.GetInsertBlock()); if (CGF.CGM.getCodeGenOpts().getDbgInfoKind() != llvm::CodeGenOptions::NoDebugInfo) { llvm::MDBuilder MDB(CGF.getLLVMContext()); llvm::MDNode *DebugLoc = MDB.createDebugLocation(Loc.getLine(), Loc.getCol(), CGF.EmitFunction(unit).getSubprogram()); llvm::CastInst *CI = llvm::cast(CGF.Builder.GetInsertPoint()); CI->setDebugLoc(DebugLoc); } if (Ty != Dest->getType()) { llvm::CastInst *CI = llvm::CastInst::CreatePointerCast(Dest, Ty, """", CGF.Builder.GetInsertBlock()); CGF.Builder.Insert(CI); Dest = CI; } CGF.Builder.CreateStore(Dest, CGF.ReturnValue); }" LLVM,ARC,58,"void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIdx, const TargetRegisterClass *RC) { assert(RC == &ARC::GPR32RegClass && ""Only support GPR32 stores to stack now.""); assert(FrameIdx != 0 && ""Frame index cannot be zero.""); unsigned SP = ARC::SP; BuildMI(MBB, MI, MI->getDebugLoc(), get(ARC::LD_rs9), DestReg) .addReg(SP).addImm(FrameIdx).addMemOperand(MBB->getParent()->getMachineMemOperand( MachinePointerInfo::getFixedStack(FrameIdx), MachineMemOperand::MOLoad, 4, alignof(uint32_t))); dbgs() << ""Created load reg="" << printReg(DestReg) << "" from FrameIndex="" << FrameIdx << ""\n""; }" LLVM,ARC,59,bool reverseBranchCondition(ARCCC::CondCode &Cond) { if (!ARCCC::isBranchCondition(Cond)) { return true; } switch (Cond) { case ARCCC::EQ: Cond = ARCCC::NE; break; case ARCCC::NE: Cond = ARCCC::EQ; break; case ARCCC::EQ_A: Cond = ARCCC::NE_A; break; case ARCCC::NE_A: Cond = ARCCC::EQ_A; break; case ARCCC::EQ_N: Cond = ARCCC::NE_N; break; case ARCCC::NE_N: Cond = ARCCC::EQ_N; break; case ARCCC::LO: Cond = ARCCC::HS; break; case ARCCC::LO_N: Cond = ARCCC::HS_N; break; case ARCCC::HS: Cond = ARCCC::LO; break; case ARCCC::HS_N: Cond = ARCCC::LO_N; break; case ARCCC::HI: Cond = ARCCC::LS; break; case ARCCC::HI_N: Cond = ARCCC::LS_N; break; case ARCCC::LS: Cond = ARCCC::HI; break; case ARCCC::LS_N: Cond = ARCCC::HI_N; break; default: return true; } return false; } LLVM,ARC,60,"void storeRegToStackSlot(MachineFunction &MF, MachineBasicBlock &MBB, MachineInstr &MI, const TargetRegisterClass *RC, Register Reg, int FrameIndex) { MachineFrameInfo &MFI = MF.getFrameInfo(); MachineConstantPool *MCP = MF.getConstantPool(); const ARCSubtarget &STI = MF.getSubtarget(); assert(STI.isARCArch() && ""Target machine does not support ARC architecture""); MachineRegisterInfo &MRI = MF.getRegInfo(); const ARCInstrInfo *TII = STI.getInstrInfo(); const MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOStore, MFI.getStackSize(), MFI.getObjectAlignment(FrameIndex), MachineMemOperand::MONone); if (RC != ARC::GPR32RegClass) { LLVM_DEBUG(dbgs() << ""Store to stack not supported for register class"" << ""\n""); return; } if (!ARC::GPR32RegClass->contains(Reg)) { LLVM_DEBUG(dbgs() << ""Only support GPR32 stores to stack now."" << ""\n""); return; }" LLVM,ARC,61," const DebugLoc &DL) { MachineFunction &MF = MBB.getParent(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); unsigned LoadOp = 0; if (RegClass->hasSubClassEq(&ARC::GPR32RegClass)) LoadOp = ARC::LD_rs9; MachineInstrBuilder LoadMI = BuildMI(MBB, MBB.end(), DL, TII.get(LoadOp)) .addReg(RegClass->getRegister(0)) .addFrameIndex(SlotIndex) .addImm(0); const MCInstrDesc &MCID = LoadMI->getDesc(); if (MCID.hasImplicitDefOfPhysReg(ARC::GPR32RegClass.getRegister(0))) LoadMI.addRegMask(TRI->getSubRegMask(ARC::g21)); DEBUG(LoadMI->print(errs())); return LoadMI; }" LLVM,ARC,62," MachineFunction &MF, const TargetRegisterInfo *MRI, MachineFrameInfo &MFI) { if (RC != &ARC::GPR32RegClass) { LLVM_DEBUG(dbgs() << ""Could not store register "" << printReg(Reg, MRI) << "" to stack: Unsupported register class.\n""); return false; } if (!MFI.isFrameIndexLegal(FrameIndex)) { LLVM_DEBUG(dbgs() << ""Could not store register "" << printReg(Reg, MRI) << "" to stack: Invalid stack frame index.\n""); return false; } unsigned StoreSize = 4; const MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo(), MachineMemOperand::MOStore, StoreSize, StoreSize); MachineInstrBuilder StoreMI = BuildMI(MF, DebugLoc(), TII.get(ARC::ST_rs9)) .addDef(ARC::rs9) .addFrameIndex(FrameIndex) .addImm(0) .addMemOperand(MMO); StoreMI->findRegisterDefOperand(ARC::rs9, true, false); LLVM_DEBUG(dbgs() << ""Created store reg="" << printReg(Reg, MRI) << "" to FrameIndex="" << FrameIndex << ""\n""); return true; }" LLVM,ARC,63, LLVM,ARC,64, LLVM,ARC,65,unsigned int getInstSizeInBytes(const MachineInstr *MI) { if (!MI->getDesc().isPseudo()) { return MI->getDesc().getSize(); } else { for (const MachineOperand &MO : MI->operands()) { if (MO.isImm()) { unsigned int Size = MO.getImm(); if (Size > 0) { return Size; } } } return ARC::ARCInstSize; } } LLVM,ARC,66,TargetInstrInfo getRegisterInfo(unsigned Reg) { TargetInstrInfo RegisterInfo; if (Reg >= ARC::R0 && Reg <= ARC::SP) { RegisterInfo.ARC = true; RegisterInfo.ARCRegister = Reg; } else { RegisterInfo.ARC = false; RegisterInfo.ARCRegister = ARC::NoRegister; } return RegisterInfo; } LLVM,ARC,67,"void insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TargetBB, DebugLoc DL) { MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(ARC::BR)); MIB.addMBB(TargetBB); MIB.addImm(1); MBB.addSuccessor(TargetBB); MBB.updateTerminator(); }" LLVM,ARC,68,"unsigned int isLoadFromStackSlot(const MachineInstr* MI){ assert(MI && ""null machine instruction""); if(MI->getOpcode() == ARC::LD16FI || MI->getOpcode() == ARC::LD32FI || MI->getOpcode() == ARC::LD64FI){ unsigned int Reg = MI->getOperand(0).getReg(); unsigned int FrameIndex = MI->getOperand(1).getIndex(); return (ARC, ARC, 1, 2, 2, 1, 0, 0) + Reg + FrameIndex; } return 0; }" LLVM,ARC,69,bool isStore(Recipe &R) { if (R.mayStoreMemory()) { const ARCSubtarget *Subtarget = R.getParent()->getSubtarget(); ARCII::UnitMemOp Type = R.getAsmWriter().getSUnitMemOpIndex(); if (Type == ARCII::ST || Type == ARCII::ST_arb_rs9 || Type == ARCII::STH || Type == ARCII::STH_arb_rs9 || Type == ARCII::STB || Type == ARCII::STB_arb_rs9) { return true; } } return false; } LLVM,ARC,70,"bool isStoreToStackSlot(const MachineInstr *MI, unsigned &SourceReg, int &FrameIndex) { if (MI->getOpcode() != ARC::STRI32) return false; SourceReg = MI->getOperand(1).getReg(); if (!ARC::ACRMaskRegClass.contains(SourceReg)) return false; int64_t Offset = MI->getOperand(2).getImm(); if (Offset != 0) return false; FrameIndex = MI->getOperand(0).getIndex(); return true; }" LLVM,ARC,71,"void loadImmediate(ARMFunction &Function, REG ArcReg, int Imm) { ARMBuilder Builder(Function); int ImmMin = -255; int ImmMax = 255; REG tmpReg = ARC::R0; if (Imm > ImmMax || Imm < ImmMin) { Builder.buildMov(tmpReg).addImm(Imm); Imm = 0; } Builder.buildInstr(ARC::MOV_rs12).addReg(ArcReg).addImm(Imm); Builder->addAnnotation(""Need Arc long immediate instructions.""); if (tmpReg != ARC::R0) { Builder.buildInstr(ARC::FREE).addReg(tmpReg); } }" LLVM,ARC,72," const TargetRegisterClass *RC) { MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); int FrameOffset = MFI.getObjectOffset(FrameIndex); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(ARC::LD_rs9)) .addReg(DestReg, RegState::Define) .addImm(0) .addImm(FrameOffset); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack( MF, FrameIndex), MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex), MFI.getObjectAlign(FrameIndex)); MIB.addMemOperand(MMO); DEBUG(dbgs() << ""Created load reg="" << PrintReg(DestReg) << "" from FrameIndex="" << FrameIndex << ""\n""); }" LLVM,ARC,73,void removeBranch(MachineBasicBlock *MBB) { MachineInstr *LastInst = MBB->getFirstTerminator(); if (LastInst->isBranch()) { MBB->erase(LastInst); if (MBB->succ_empty()) { MBB->getParent()->erase(MBB); } else { MBB->removeSuccessor(MBB->succ_size() - 1); } } } LLVM,ARC,74, LLVM,ARC,75,"void storeRegToStackSlot(ARCFG &ARC, unsigned Reg, int FrameIndex) { const TargetRegisterClass *RC = ARC::GPR32RegClass; MachineFunction &MF = ARC.getMachineFunction(); if (!RC->hasSubClassEq(&ARC::GPR32RegClass)) { LLVM_DEBUG(errs() << ""Only support GPR32 stores to stack now.\n""); MF.getMMI().setARCCachedFrameInfo(MF, nullptr, false); return; } if (MF.getSubtarget().isThumb()) { LLVM_DEBUG(errs() << ""Only support 4-byte stores to stack now.\n""); MF.getMMI().setARMCachedFrameInfo(MF, nullptr, false); return; } if (FrameIndex >= 0) { LLVM_DEBUG(errs() << ""Cannot store to positive stack frame index.\n""); return; } MachineFrameInfo &MFI = MF.getFrameInfo(); if (!MFI.isFixedObjectIndex(FrameIndex)) { LLVM_DEBUG(errs() << ""Frame index "" << FrameIndex << "" is not a fixed index.\n""); return; } int64_t Size = MFI.getObjectSize(FrameIndex); if (Size > 4) { LLVM_DEBUG(errs() << ""Only support 4-byte stores to stack now.\n""); return; } MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo(), MachineMemOperand::MOStore, Size, Align(Size), AAMDNodes(), nullptr, SynchronizeScope::System, SynchronizeScope::System ); if (!MMO) { LLVM_DEBUG(errs() << ""Couldn't get MachineMemOperand for store to stack.\n""); return; }" LLVM,ARC,76, LLVM,ARC,77,"void Select(Node *N, InstructionList &Instructions) { switch (N->getOpcode()) { case ARC::MOV_rs12: { MachineInstr *MI = BuildMI(Instructions, N->getDebugLoc(), TII->get(ARC::MOV_rs12)) .addUse(N->getOperand(0)) .addUse(N->getOperand(1)) Instructions.push_back(MI); break; } case ARC::MOV_rlimm: { MachineInstr *MI = BuildMI(Instructions, N->getDebugLoc(), TII->get(ARC::MOV_rlimm)) .addUse(N->getOperand(0)) .addImm(N->getOperand(1).getImm()) Instructions.push_back(MI); break; } case ARC::ARC: { MachineInstr *MI = BuildMI(Instructions, N->getDebugLoc(), TII->get(ARC::ARC)) .addUse(N->getOperand(0)) .addUse(N->getOperand(1)) Instructions.push_back(MI); break; } case ISD::Constant: { unsigned int Val = N->getConstantOperandVal(0); MachineInstr *MI = BuildMI(Instructions, N->getDebugLoc(), TII->get(ARC::ARC)) .addImm(Val); MI->addReg(ARC::R0); Instructions.push_back(MI); break; } } }" LLVM,ARC,78," SelectionDAG &DAG) { SDValue ReturnVal = DAG.getLoad(MVT::i32, Chain, Outs.DstAddr, DAG.getPointerTy(), Outs.AM); SDValue Ops[] = { ReturnVal, Chain }; SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue Ret = DAG.getNode(ARCISD::RET, SDLoc(Chain), VTs, Ops); DAG.ReplaceAllUsesWith(Chain, Ret); }" LLVM,ARC,79, LLVM,ARC,80,"SDValue ReplaceNodeResults(SDValue N, const SDValue *Results, unsigned NumResults) { if (N.getValueType().getTypeForEVT(N.getValueType()) == MVT::Other) { SDValue *newValues = BuildCustomValues(N, Results, NumResults); SDValue *newResults = new SDValue[NumResults]; for (unsigned int i = 0; i < NumResults; i++) { newResults[i] = newValues[i]; } delete newValues; return newResults; } return Results; }" LLVM,ARC,81,"bool CanLowerReturn(const SmallVectorImpl &Outs, LLVMContext &Context) { if (Outs.size() == 1 && Outs[0].Flags.isByVal() && Outs[0].ByValType.getSizeInBits() == 16 && Outs[0].OriginalAlignment == 0) { if (Outs[0].VT == MVT::ARC) { if (Outs[0].Flags[i] != 0) { return true; } } } return false; }" LLVM,ARC,82,"function getTargetNodeName(instruction) { if (instruction == ARC) { return ""ARC""; } else if (instruction == ARCISD::BL) { return ""ARCISD::BL""; } else if (instruction == ARCISD::CMOV) { return ""ARCISD::CMOV""; } else if (instruction == ARCISD::CMP) { return ""ARCISD::CMP""; } else if (instruction == ARCISD::BRcc) { return ""ARCISD::BRcc""; } else if (instruction == ARCISD::RET) { return ""ARCISD::RET""; } else if (instruction == ARCISD::GAWRAPPER) { return ""ARCISD::GAWRAPPER""; } else { return ""Target specific value not found""; } }" LLVM,ARC,83,"bool isLegalAddressingMode(unsigned AM, unsigned LoadStoreType) { switch(LoadStoreType) { case ARC: if (AM == ARC || AM == 0) { return true; } break; default: return false; } }" LLVM,ARC,84, ArrayRef Ins) { MachineFrameInfo &MFI = MF.getFrameInfo(); const ARCRegisterInfo *RegInfo = static_cast( MF.getSubtarget().getRegisterInfo()); MachineRegisterInfo &MRI = MF.getRegInfo(); unsigned NumArgs = Ins.size(); if (NumArgs == 0) return; unsigned ArgSize = 0; for (MachineArgument Arg : Ins) { if (Arg.ArgType == MachineArgument::MACHINE_VALARG) { ArgSize += RegInfo->getPointerSize(); } else { continue; } } LLVM,ARC,85, LLVM,ARC,86,"void LowerReturn(SDValue Chain, const ARCFunctionInfo *AFI, SmallVectorImpl &Outs, SDValue Ret, const SDLoc &dl, SelectionDAG &DAG) { const Function *F = DAG.getMachineFunction().getFunction(); CallingConv::ID CC = F->getCallingConv(); EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); SDVTList NodeTys = DAG.getVTList(Ret.getValueType()); SDValue FrameOffset = DAG.getFrameIndex(AF.getARCRetAddr().FI); SDValue Args[] = { Chain, Ret, FrameOffset, DAG.getConstant(4, dl, PtrVT), Ret, DAG.getConstant(1, dl, PtrVT), DAG.getConstant(4, dl, PtrVT), DAG.getConstant(0, dl, PtrVT), DAG.getTargetConstant(MVT::Other, PtrVT), DAG.getValueType(MVT::Other), DAG.getTargetConstant(0, PtrVT), }; SDValue RV = DAG.getNode(ARCISD::RET, dl, NodeTys, Args); Chain = RV.getValue(0); }" LLVM,ARC,87,"bool mayBeEmittedAsTailCall(const Target& target) { if (target == ARC || target == ARC,ARC) { return true; } else { return false; } }" LLVM,ARC,88, LLVM,ARC,89, LLVM,ARC,90,void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); AU.addRequiredID(ARC); AU.addRequired(); } LLVM,ARC,91,"function getPassName(pass) { const passName = pass.toLowerCase().replace(/[^\w\s]/g, ''); const arcPassName = passName + ""arc""; return arcPassName; }" LLVM,ARC,92,"void runOnMachineFunction(MachineFunction &MF) { MF.setTargetTriple(""arc""); MF.setTargetCPU(""ARC""); MF.setTargetFeature(""ARC""); EmitFunctionBody(MF); }" LLVM,ARC,93," MachineBasicBlock::iterator MI) { MachineInstr &Instr = *MI; for (unsigned int i = 0, e = Instr.getNumOperands(); i != e; ++i) { MachineOperand &Op = Instr.getOperand(i); if (Op.isFI()) { Instr.ReplaceFI(MF.getFrameInfo()->getObjectIndex(Op.getIndex())); } } }" LLVM,ARC,94,"bool needsFrameMoves(const ARCSubtarget *target, const MachineFunction &func, const ARCFunctionInfo &state) { if (target->needsFrameMoves()) return true; if (func.getFunction()->hasFnAttribute(""arc_requires_frame_moves"")) return true; else if (func.getFunction()->hasFnAttribute(""arc_no_frame_moves"")) return false; if (state.propertyRequiresFrameMoves()) return true; return false; }" LLVM,ARC,95,"void eliminateFrameIndex(MachineInstr &MI) { for (MachineInstr::mop_iterator it = MI.operands_begin(), e = MI.operands_end(); it != e; ++it) { if (it->isFI()) { unsigned int frameIndex = it->getIndex(); int offset = it->getOffset(); MachineOperand newOperand = MachineOperand::CreateImm(offset); MachineInstr::mop_iterator newIt = MI.addOperand(newOperand); MI.setDesc(TII.get(MI.getOpcode()-ARC)); MI.RemoveOperand(newIt); } } }" LLVM,ARC,96,"void eliminateFrameIndex(MachineInstr* MI, unsigned Idx, unsigned FrameIndex, StackSize size, LocalFrameSize localSize) { if (MI->isUsingFrameIndex(Idx)) { if (FrameIndex == 0) { MI->ReplaceFrameIndex(Idx, 0, size); } else { MI->ReplaceFrameIndex(Idx, 0, localSize); } } }" LLVM,ARC,97, LLVM,ARC,98,"void eliminateFrameIndex(MachineFunction &MF, MachineBasicBlock &MBB,MachineBasicBlock::iterator MI) { for (MachineBasicBlock::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { int FrameIdx = -1; if (MI->isMoveImmediate()) { for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { if (I->getOperand(i).isFI()) { FrameIdx = I->getOperand(i).getIndex(); break; } } if (FrameIdx >= 0) { I = MBB.erase(I); --I; } } }" LLVM,ARC,99, LLVM,ARC,100,"void eliminateFrameIndex(MachineInstr *MI, uint64_t TFI, int &Opcode, unsigned &DstReg, unsigned &SrcReg, unsigned &Src2Reg, const TargetInstrInfo *TII) { if (SrcReg == TFI) { int SPIdx = MI->findRegisterUseOperandIdx(M86::SP); assert(SPIdx != -1 && ""SP operand not found in instruction""); int64_t Offset = MI->getOperand(SPIdx).getImm(); MI->getOperand(SPIdx).ChangeToImmediate(Offset + Src2Reg); } else if (Src2Reg == TFI) { int FPIdx = MI->findRegisterUseOperandIdx(M86::FP); assert(FPIdx != -1 && ""FP operand not found in instruction""); int64_t Offset = MI->getOperand(FPIdx).getImm(); MI->getOperand(FPIdx).ChangeToImmediate(Offset + SrcReg); } else if (DstReg == TFI) { llvm_unreachable(""Unexpected register operand""); } }" LLVM,ARC,101,"TargetRegisterList ARCBackend::getCalleeSavedRegs(const MachineFunction *MF) const { TargetRegisterList Regs; Regs.addReg(ARC::R13, RegState::Preserved); Regs.addReg(ARC::R14, RegState::Preserved); Regs.addReg(ARC::R15, RegState::Preserved); return Regs; }" LLVM,ARC,102,"llvm::TargetRegisterInfo::CallPreservedMask llvm::Function::getCallPreservedMask(CallConv::ID CC) const { switch (CC) { case CallConv::ARC: return { TargetRegisterInfo::ARC, TargetRegisterInfo::ARC, TargetRegisterInfo::ARC }; default: return TargetRegisterInfo::CallPreservedMask(); } }" LLVM,ARC,103, LLVM,ARC,104,"BitVector getReservedRegs(const ARCSubtarget &Target, const MachineFunction &MF) { BitVector ReservedRegs(ARC::NUM_PHYSICAL_REGISTERS); ReservedRegs.set(ARC::ILINK); ReservedRegs.set(ARC::SP); ReservedRegs.set(ARC::GP); ReservedRegs.set(ARC::R25); ReservedRegs.set(ARC::BLINK); ReservedRegs.set(ARC::FP); return ReservedRegs; }" LLVM,ARC,105,bool needsFrameMoves(const TargetMachine &TM) { if (TM.getTargetTriple().getArch() == Triple::Arc) return true; if (TM.getSubtarget().isARCEnabled()) return true; return false; } LLVM,ARC,106,bool requiresRegisterScavenging(TargetMachine &target) { LLVMTargetMachine *LLVMTarget = dynamic_cast(&target); if (!LLVMTarget) return false; MCRegisterInfo *regInfo = target.getMC().getObjectFileInfo()->getRegisterInfo(); if (!regInfo->getRegScavenger()) return false; if (!target.getIsa() && !target.getIsa()) return false; return true; } LLVM,ARC,107,"void trackLivenessAfterRegAlloc() { if (Target.isARC()) { Target.setTargetSpecificValue(""ARC"", ""ARC""); } bool trackLiveness = false; if (trackLiveness) { Target.setTrackLiveInsAfterRegAlloc(true); } else { Target.setTrackLiveInsAfterRegAlloc(false); } }" LLVM,ARC,108,bool useFPForScavengingIndex(const TargetMachine &TM) { const Triple &T = TM.getTargetTriple(); if (T.getArch() != Triple::arc) { return false; } return true; } LLVM,ARC,109,InferFixedInARCParIsFalseOption(DefaultARCRegisterInfo::Unknown) { ARCArchVersion = TM.getCPU(); ARCArchName = ARM::getARCArch(ArchVersion); TargetABI = ComputeTargetABI(TT.getArch()); HasDivide = false; HasAtomic = false; HasSlowIndirectJumps = false; if (CodeGenFileType == CodeGenFileType::AssemblyFile) HasMovt = true; else if (CodeGenFileType == CodeGenFileType::ObjectFile) HasMovt = false; if (TM.getDataLayout()->getPointSize() == 32) { HasSIT = true; HasGICC2 = false; if (ARM::ArchVersion == ARM::BR) HasSSE = true; else if (ARM::ArchVersion == ARM::BEBE) HasSSse = true; } if (CodeGenFileType == CodeGenFileType::AssemblyFile) DefaultARCRetentionMode = TargetMC::ARCVocab::SLDR; else if (CodeGenFileType == CodeGenFileType::ObjectFile) DefaultARCRetentionMode = TargetMC::ARCVocab::SLDD; else DefaultARCRetentionMode = TargetMC::ARCVocab::SNLD; } LLVM,ARC,110,"LLVM::ARCSubtarget::ARCSubtarget(const Triple &T, const std::string &CPU, const std::string &FS) { NewlibABI = false; LinuxABI = false; ROPI = false; RWPI = false; IsSubtarget = true; ParseSubtargetFeatures(CPU, FS); ProcessTriple(T); } void LLVM::ARCSubtarget::ParseSubtargetFeatures(const std::string &CPU, const std::string &FS) { if (CPU == ""arc700"") { NewlibABI = true; } else { LinuxABI = true; } std::vector FeatureVec; StringRef FSStringRef(FS); FSStringRef.split(FeatureVec, "","", -1, false); for (unsigned i = 0, e = FeatureVec.size(); i < e; ++i) { if (FeatureVec[i] == ""+ropi"") { ROPI = true; } if (FeatureVec[i] == ""+rwpi"") { RWPI = true; } } } void LLVM::ARCSubtarget::ProcessTriple(const Triple &T) { switch (T.getArch()) { default: llvm_unreachable(""Unknown target architecture""); case Triple::arc: ARCArch = Triple::arc; ARCArchName = ""ARC""; Endianness = support::little; ARCArchVersion = ARCArch::ARCv2; break; case Triple::arcv2: ARCArch = Triple::arcv2; ARCArchName = ""ARCv2""; Endianness = support::little; ARCArchVersion = ARCArch::ARCv2; break; case Triple::arcv3: ARCArch = Triple::arcv3; ARCArchName = ""ARCv3""; Endianness = support::little; ARCArchVersion = ARCArch::ARCv3; break; } }" LLVM,ARC,111,ARCSubtarget::ARCSubtarget(Triple) { InitDefaultFeatures(Triple); UseARC = true; ARCArch = ARC; ARCAux = ARC; ARCCtx = ARC; } LLVM,ARC,112,TargetInstrInfo *getInstrInfo(const ARCTargetMachine &TM) { return TM.getSubtargetImpl()->getInstrInfo(); } LLVM,ARC,113,TargetRegisterInfo* getRegisterInfo(TargetInstrInfo* InstrInfo) { if (InstrInfo == nullptr) { return nullptr; } TargetRegisterInfo* ARCRegInfo = new TargetRegisterInfo(ARC); ARCRegInfo->init(InstrInfo); return ARCRegInfo; } LLVM,ARC,114,"namespace llvm { class ARCTargetMachine : public llvm::TargetMachine { std::unique_ptr DL; public: ARCTargetMachine(const llvm::Target &T, const llvm::Triple &TT, StringRef CPU, StringRef FS, const llvm::TargetOptions &Options, Optional RM, Optional CM, CodeGenOpt::Level OL, bool JIT); virtual const llvm::TargetSubtargetInfo *getSubtargetImpl(const llvm::Function &) const override; virtual TargetIRAnalysis getTargetIRAnalysis() override; virtual const llvm::DataLayout *getDataLayout() const override { return DL.get(); } virtual CodeGenOpt::Level getOptLevel() const override { return CodeGenOpt::Aggressive; }" LLVM,ARC,115,"void addPreRegAlloc(ARCTargetMachine &TM) { addPreSchedOptimizationPasses(TM); addTargetSpecificPasses(TM, ARC, ARC, ARC); }" LLVM,ARC,116, LLVM,ARC,117, LLVM,ARC,118,"TargetTransformInfo *getTargetTransformInfo(StringRef Target) { if (Target == ""ARC"") { return new ARCTargetTransformInfo(); } return nullptr; }" LLVM,ARC,119,"TargetIRAnalysis* getTargetIRAnalysis(const Triple& TargetTriple) { if (TargetTriple.getArch() == Triple::ARC) { return new TargetIRAnalysis(ARC, ARC, ARC); } else { return new TargetIRAnalysis(); } }" LLVM,ARC,120,"void addInstSelector(TargetMachine &TM, PassManagerBase &PM) { const Triple &TT = TM.getTargetTriple(); if (TT.getArch() != Triple::arc) return; PM.add(createARCMachineInstructionSelectorPass(TM)); }" LLVM,ARC,121,"bool addPreEmitPass(FunctionPassManager &PM, TargetMachine &TM) { if (TM.getTargetTriple().getArch() != Triple::ARC) return false; const ARCSubtarget &Subtarget = TM.getSubtarget(); ARCMachineFunctionInfo *FuncInfo = Subtarget.getMachineFunctionInfo(); PM.addPass(createARCPreEmitPass(Subtarget, FuncInfo)); return true; }" LLVM,ARC,122,"void addPreRegAlloc(TargetPassConfig &TPC, PassManagerBase &PM, const TargetMachine &TM) { } This function takes in three parameters: 1. TargetPassConfig &TPC - This is an instance of the TargetPassConfig class which contains information about the target machine, including its register allocation strategy and available registers. 2. PassManagerBase &PM - This is an instance of the PassManagerBase class which is responsible for managing the passes that are run during the compilation process. 3. const TargetMachine &TM - This is a constant reference to the TargetMachine class which contains information about the target architecture and its features. The purpose of this function is to provide a target-specific implementation of passes that need to be run immediately before the register allocation phase. This allows targets to perform any necessary transformations or optimizations on the code before it is assigned to physical registers. To implement this function, we can use the addTargetPasses method of the TargetPassConfig class. This method takes in a PassManagerBase reference and a reference to the TargetMachine class and is responsible for adding target-specific passes to the pass manager. In the body of the function, we can use the addTargetPasses method to add passes for ARC,ARC,ARC,ARC targets. For example, the following code adds the MyARCPass pass to the pass manager for the ARC target: TPC.addTargetPasses(PM, new MyARCPass(TM)); We can add as many target-specific passes as needed using the addTargetPasses method. Finally, this function needs to be called from the initializeARCPreRegAllocPass function which is responsible for initializing the pre-register-allocation passes for the ARC target. This ensures that our target-specific passes are added to the pass manager before the register allocation phase. void initializeARCPreRegAllocPass(PassRegistry &Registry) { addTargetPasses(addPreRegAlloc); }" LLVM,ARC,123,"ARCTargetMachine() { StringRef archName = ""ARC""; StringRef targetFeatures = ""e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-""; StringRef targetTypes = ""f32:32:32-i64:32-f64:32-a:0:32-n32""; return ARC::TargetMachine(archName, ""arc"", targetFeatures, targetType); }" LLVM,ARC,124,PassConfig createPassConfig(ARC &arc) { PassConfig Config; Config.addCodeGenPass(arc.createARCCodeGenPass()); Config.addCodeGenPass(arc.createARCCodeGenPass()); Config.addCodeGenPass(arc.createARCCodeGenPass()); return Config; } LLVM,ARC,125,TargetLoweringObjectFile* ARCTargetMachine::getObjFileLowering() const { return new ARCTargetLoweringObjectFile(); } LLVM,ARC,126,const TargetSubtargetInfo &ARC::getSubtargetImpl(const Function &F) const { return ARCSubtarget; } LLVM,ARC,127,TargetTransformInfo *getTargetTransformInfo(const Triple &T) { ARCTargetTransformInfo *ARCTTI = new ARCTargetTransformInfo(); ARCTTI->setARCVectors(); ARCTTI->setARCMacros(); ARCTTI->setARCFlags(); return ARCTTI; } LLVM,ARC,128,"TargetLowering *getTLI(const TargetMachine &TM, const TargetTransformInfo &TTI) { return new ARCTargetLowering(TM, TTI); }" LLVM,RISCV,0, MCFixup const& Fixup) { switch (Fixup.getKind()) { case RISCV::fixup_riscv_relax_a: case RISCV::fixup_riscv_relax_b: case RISCV::fixup_riscv_relax_c: case RISCV::fixup_riscv_relax_d: case RISCV::fixup_riscv_relax_g: case RISCV::fixup_riscv_relax_h: case RISCV::fixup_riscv_relax_i: case RISCV::fixup_riscv_relax_j: case RISCV::fixup_riscv_relax_k: return true; default: return false; } } LLVM,RISCV,1, LLVM,RISCV,2,int getNumFixupKinds() { return RISCV::NumTargetFixupKinds; } LLVM,RISCV,3,"DataFragment applyFixup(Fixup fixup, DataFragment dataFragment, int value) { if (fixup.getOffset() < 0 || fixup.getOffset() > dataFragment.getSize()) { std::cout << ""Invalid fixup offset!""; return dataFragment; } FixupKind kind = fixup.getKind(); switch (kind) { case RISCV: dataFragment.updateInstructions(fixup.getOffset(), value); break; case RISCV7: dataFragment.updateInstructions(fixup.getOffset(), value); break; case RISCV8: dataFragment.updateInstructions(fixup.getOffset(), value); break; default: std::cout << ""Invalid fixup kind!""; break; } return dataFragment; }" LLVM,RISCV,4,MCObjectWriter * RISCVTargetMachine::createObjectWriter(raw_pwrite_stream &OS) const { } LLVM,RISCV,5,"bool fixupNeedsRelaxation(MCInst &Inst, const MCSubtargetInfo &STI, const MCAsmBackend &MAB) { const MCTargetOptions &Options = STI.getTargetOptions(); uint64_t RISCVTarget = Options.getRISCVTarget(); if (!Inst.getFlag(MCInst::FlgResolved)) { if (RISCVTarget != 0) { return true; } } return false; }" LLVM,RISCV,6, LLVM,RISCV,7, LLVM,RISCV,8, MCContext &Ctx) const { unsigned Opcode = Inst.getOpcode(); switch (Opcode) { case RISCV::ADDI: case RISCV::ADDI16SP: case RISCV::SLLI: case RISCV::SRLI: case RISCV::SRAI: if (Inst.getOperand(2).getImm() <= 0xfff || Inst.getOperand(2).getImm() >= -0x1000) { return false; } LLVM,RISCV,9, LLVM,RISCV,10,"void applyFixup(MCAssembler &Asm, const MCAsmLayout &Layout, const MCFixup &Fixup, MCDataFragment &DF, const MCValue &Target, bool IsPCRel, MCFixupKind Kind){ uint64_t FixupOffset = Fixup.getOffset(); if(FixupOffset >= DF.getContents().size()){ report_fatal_error(""Invalid fixup offset!""); } unsigned Size = Kind.getTargetSize(); unsigned ValueSize = Target.getBitWidth(); if(ValueSize > Size){ report_fatal_error(""Invalid value size for fixup!""); }" LLVM,RISCV,11, LLVM,RISCV,12,bool mayNeedRelaxation(const MachineInstr *MI) { if (MI->getDesc().getOpcode() == RISCV::OPCODE) { if (MI->getDesc().mayNeedRelaxation(RISCV)) { return true; } } return false; } LLVM,RISCV,13, LLVM,RISCV,14, LLVM,RISCV,15," uint64_t &Value, bool IsPCRel) { const MCFixupKindInfo &Info = getFixupKindInfo(Fixup.getKind()); if (!IsPCRel && !Info.Flags) report_fatal_error(""Invalid fixup offset!""); uint64_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); uint64_t NumBits = 8 * Info.TargetSize; uint64_t Mask = (1 << NumBits) - 1; uint64_t FixupValue = Target.getConstant(); Value = (Value & ~(Mask << Offset)) | ((FixupValue & Mask) << Offset); if (NumBits > Info.TargetSize) { unsigned Shift = Offset + Info.TargetSize; Shift %= 8; Data[Offset + Info.TargetSize] |= FixupValue >> (NumBits - Info.TargetSize - Offset); Data[Offset + Info.TargetSize + 1] |= FixupValue >> (NumBits - Info.TargetSize) << (8 - Shift); } }" LLVM,RISCV,16,"bool fixupNeedsRelaxationAdvanced(const fixup_kind &fk, unsigned int value1, unsigned int value2) { if (fk == RISCV::fixup_riscv_rvc_branch || fk == RISCV::fixup_riscv_rvc_jump) { if (value1 == 254 || value1 == 256 || value2 == 2046 || value2 == 2048) { return true; } else { return false; } } else { return false; } }" LLVM,RISCV,17,RISCV::FixupKind getFixupKind(int expression) { if (expression == 1u || expression == -1u) { return RISCV; } else if (expression <= 65535) { return RISCV_RVC_IMM16; } else if (expression <= 4294967295) { return RISCV_32; } else if (expression <= 18446744073709551615) { return RISCV_64; } else { return BFD_RELOC_NONE; } } LLVM,RISCV,18, LLVM,RISCV,19, LLVM,RISCV,20,"bool shouldForceRelocation(RelocationEntry& RE, FeatureBitset& FB) { if ( RE.FixupKind == RISCV::fixup_riscv_got_hi20 || RE.FixupKind == RISCV::fixup_riscv_tls_got_hi20 || RE.FixupKind == RISCV::fixup_riscv_tls_gd_hi20 ) { if (FB[RISCV::FeatureRelax]) { return true; } } return false; }" LLVM,RISCV,21,"bool shouldInsertExtraNopBytesForCodeAlign(BasicBlock *BB, unsigned *Align, unsigned *MaxNops) { if (BB->getParent()->getTargetTriple().getArch() != Triple::riscv) { return false; } RISCVMachineFunctionInfo *MFI = BB->getParent()->getInfo(); for (MachineBasicBlock *Pred : BB->predecessors()) { if (MFI->getPredRelaxed(Pred)) { *Align = 4; *MaxNops = 2; return true; } } if (MFI->hasRISCVStandardExtC()) return false; *Align = 2; *MaxNops = 4; return true; }" LLVM,RISCV,22,"bool shouldInsertFixupForCodeAlign(MCInst *instruction, RISCV::AlignInfo targetInfo) { if (targetInfo.target == RISCV && targetInfo.feature == RISCV::FeatureRelax) { if (instruction->getOpcode() == RISCV::fixup_riscv_align) { return true; } } return false; }" LLVM,RISCV,23,"int writeNopData(int Count, OutputStream output) { if(!output) { return -1; } if(Count <= 0) { return -1; } int totalBytes = 0; int bytesWritten; while(totalBytes < Count) { bytesWritten = output.writeUInt32BE(0x00000013); if(bytesWritten < 0) { return -1; } bytesWritten = output.writeUInt32BE(0x00000000); if(bytesWritten < 0) { return -1; } if(RISCV::hasFeature(RISCV::FeatureStdExtC)) { bytesWritten = output.writeUInt32BE(0x00000000); if(bytesWritten < 0) { return -1; } } totalBytes += 4; if(totalBytes < Count) { bytesWritten = output.writeUInt32BE(0x00000001); if(bytesWritten < 0) { return -1; } totalBytes += 4; if(totalBytes < Count) { bytesWritten = output.writeUInt32BE(0x00000002); if(bytesWritten < 0) { return -1; } totalBytes += 4; } } } return totalBytes; }" LLVM,RISCV,24,"void writeNopData(const MCTargetOutputStream &OS, unsigned Count) { unsigned NumNopInstructions = (Count + 3) / 4; OS.EmitIntValue(RISCV, 4); OS.EmitIntValue(RISCV, 4); OS.EmitIntValue(RISCV::FeatureStdExtC, 4); OS.EmitIntValue(2, 4); OS.EmitIntValue(4, 4); OS.EmitIntValue(0, 4); OS.EmitIntValue(4, 4); OS.EmitIntValue(4, 4); OS.EmitIntValue(""\x13\0\0\0"", 4); OS.EmitIntValue(4, 4); OS.EmitIntValue(""\x01\0"", 2); std::string NOPs; for (unsigned i = 0; i < NumNopInstructions; i++) { NOPs += ""\x00\0\0\0""; } OS.EmitBinaryData(StringRef(NOPs), NumNopInstructions * 4); }" LLVM,RISCV,25,"bool shouldForceRelocation(const MCFixup &Fixup, MCContext &Ctx) { switch (Fixup.getKind()) { case RISCV::fixup_riscv_got_hi20: case RISCV::fixup_riscv_tls_got_hi20: case RISCV::fixup_riscv_tls_gd_hi20: case RISCV::fixup_riscv_pcrel_lo12_i: case RISCV::fixup_riscv_pcrel_lo12_s: case RISCV::fixup_riscv_pcrel_hi20: return Ctx.getObjectFileInfo()->getRelocMIPSABI() == RISCV::FeatureRelax; default: return false; } }" LLVM,RISCV,26," MCContext &Ctx) { if (!STI.hasFeature(RISCV::FeatureRelax) || !STI.hasFeature(RISCV::FeatureStdExtC)) return false; const unsigned MinAlignBytes = 2; unsigned AlignBytes = STI.getCodeAlignment(); if (AlignBytes > 1 && AlignBytes <= BytesToEmit) { if (Inst.getOpcode() == RISCV::ADDI && Inst.getOperand(0).getReg() == RISCV::X0 && Inst.getOperand(1).getReg() == RISCV::X0 && Inst.getOperand(2).getImm() == 0) { const MCSection *CurrentSection = Ctx.getCurrentSection().first; const MCSection *PrevSection = Ctx.getPreviousSection().first; if (CurrentSection == PrevSection) { if (Ctx.getCurrentFragment()->getContents().size() > BytesToEmit) return false; } if (AlignBytes % 4 != 0) { errs() << ""Cannot insert extra NOP bytes as alignment is not a multiple of 4\n""; return false; } unsigned ExtraBytes = AlignBytes - BytesToEmit; errs() << ""Inserting "" << ExtraBytes << "" NOP bytes for alignment\n""; for (unsigned i = 0; i < ExtraBytes/4; i++) Ctx.emitInstruction(MCInstBuilder(RISCV::ADDI) .addReg(RISCV::X0) .addReg(RISCV::X0) .addImm(0)); return true; } } return false; }" LLVM,RISCV,27, Align Alignment) const { const RISCVSubtarget &STI = MF.getSubtarget(); const RISCVTargetMachine &TM = STI.getTargetMachine(); if (!STI.hasFeature(RISCV::FeatureRelax)) { return false; } if (!STI.hasFeature(RISCV::FeatureFixupRISCVAlign)) { return false; } if (!TM.requireAlignedFixupForCodeAlign()) { return false; } unsigned MaxSupportedAlignment = STI.getMaxSupportedAlignment(); return Alignment > MaxSupportedAlignment; } LLVM,RISCV,28, LLVM,RISCV,29,int getNumFixupKinds() { return RISCV_FIXUPKINDS_COUNT; } LLVM,RISCV,30,"void RISCVAsmBackend::relaxInstruction(MCInst &Inst, const MCSubtargetInfo &STI) const { switch (Inst.getOpcode()) { case RISCV::ADD: case RISCV::SUB: Inst.setOpcode(RISCV::ADDW); break; case RISCV::LUI: Inst.setOpcode(RISCV::LUIW); break; default: break; } }" LLVM,RISCV,31,bool shouldForceRelocation(int target) { if (target == RISCV || target == RISCV::FeatureRelax) { return true; } return false; } LLVM,RISCV,32,"bool fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, uint64_t Value, const MCRelaxableFragment *DF) { if (DF->getArch() != Triple::riscv) return false; MCFixupKind Kind = (MCFixupKind)Fixup.Kind; switch (Kind) { case RISCV::fixup_riscv_rvc_branch: case RISCV::fixup_riscv_rvc_jump: case RISCV::fixup_riscv_rvc_cjump: break; default: return false; } uint64_t Min = -((1 << 10) - 1); uint64_t Max = (1 << 11) - 1; if (Value >= Min && Value <= Max) return false; if (Value % 2 == 0 && Value / 2 >= Min && Value / 2 <= Max) return false; return true; }" LLVM,RISCV,33, LLVM,RISCV,34,bool shouldForceRelocation(const Relocation &Rel) { uint64_t Type = Rel.Type; bool ForceRelocation = false; switch (Type) { case R_RISCV_HI20: case R_RISCV_PCREL_HI20: { if (Rel.Addend > 0x1fffff) ForceRelocation = true; break; } default: { ForceRelocation = true; break; } } return ForceRelocation; } LLVM,RISCV,35,"bool shouldForceRelocation(const RISCVTargetMachine &TM, unsigned Kind) { switch (Kind) { default: return false; case RISCV::fixup_riscv_got_hi20: case RISCV::fixup_riscv_pcrel_lo12_i: case RISCV::fixup_riscv_pcrel_lo12_s: return true; case RISCV::fixup_riscv_pcrel_hi20: return TM.getTargetFeatureBits() != RISCV::FeatureRelax; } }" LLVM,RISCV,36,"bool shouldForceRelocation(const MCAssembler &Asm, const MCValue &Target, unsigned int FixupKind) { if (Asm.getTargetTriple().isArch(""riscv"")) { if (FixupKind == RISCV::fixup_riscv_got_hi20 || FixupKind == RISCV::fixup_riscv_tls_got_hi20 || FixupKind == RISCV::fixup_riscv_tls_gd_hi20) { if (Target.getFeatureBits() & RISCV::FeatureRelax) { return true; } } } return false; }" LLVM,RISCV,37, LLVM,RISCV,38,"MCObjectWriter *createObjectWriter(const Triple &TT, raw_pwrite_stream &OS, bool IsLittleEndian) { if (!TT.isArch(""riscv"")) { return NULL; } MCObjectWriter *OW = new RISCVObjectWriter(OS, IsLittleEndian); return OW; }" LLVM,RISCV,39,"void applyFixup(MCAssembler &Asm, const MCAsmLayout &Layout, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef Data, SmallVectorImpl &Relocs) { assert(Fixup.getKind() == RISCV && ""Unexpected fixup kind!""); uint64_t Offset = Fixup.getOffset(); const MCFixupKindInfo &Info = Asm.getFixupKindInfo(Fixup.getKind()); assert(Offset + Info.TargetSize <= Data.size() && ""Invalid fixup offset!""); uint64_t Value = Target.evalAsAbsolute().getZExtValue(); if (Info.Flags & MCFixupKindInfo::FKF_IsPCRel) { Value -= Layout.getSymbolOffset(Fixup.getValue()); } uint64_t FixupOffset = Offset + Info.FixupOffset; uint64_t Mask = ((1ULL << Info.TargetSize) - 1) << FixupOffset; uint64_t OldValue = *((uint64_t *)&Data[Offset]); uint64_t NewValue = (OldValue & ~Mask) | ((Value << FixupOffset) & Mask); *((uint64_t *)&Data[Offset]) = NewValue; if (Info.Flags & MCFixupKindInfo::FKF_IsTarget) { Relocs.push_back(MCFixup::Create(Offset, Fixup.getValue(), MCFixup::getKindForSize(Info.TargetSize), Fixup.getOffset())); } }" LLVM,RISCV,40,"void applyFixup(const Fixup &fixup, DataFragment &data_fragment, uint32_t offset) { if (fixup.getTargetKind() != RISCV_TARGET_KIND) { errs() << ""Fixup is not supported for RISCV target.\n""; return; } if (fixup.getKind() == RISCV_FIXUP_KIND) { uint32_t value = fixup.getValue(); memcpy(data_fragment.data() + offset, &value, sizeof(value)); } if(offset >= data_fragment.size()) { errs() << ""Invalid fixup offset!""; return; } return data_fragment; }" LLVM,RISCV,41, LLVM,RISCV,42,"int getSize(int target_specific_value, int size, int jump_opcode, int branch_opcode, int target_specific_size) { 2. Check if the target specific value is equal to RISCV, and if yes, continue with the calculation: if(target_specific_value == RISCV) { 3. Check if the instruction is a jump or branch instruction, as they have variable encoding sizes: if(jump_opcode || branch_opcode) { 4. Calculate the size of the instruction encoding by adding the size of the opcode and target specific value: int encoding_size = size + target_specific_size; 5. Return the calculated encoding size: return encoding_size; } 6. If the instruction is not a jump or branch instruction, return the size of the instruction encoding: else { return size; } 7. If the target specific value is not equal to RISCV, return zero as the size cannot be determined: else { return 0; }" LLVM,RISCV,43,"unsigned getFixupKind(unsigned RISCV, unsigned RISCV, unsigned int1, unsigned int1) { switch (RISCV) { case RISCV: switch (RISCV) { case RISCV: return LLVM::FK_Data_1; default: return LLVM::FK_Data_2; } default: return 0; } }" LLVM,RISCV,44, LLVM,RISCV,45, LLVM,RISCV,46,"bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, const MCRelaxableFragment *DF, const MCAsmLayout &Layout) { } 3. Inside the function, retrieve the target specific values from the RISCV backend using the ""DF"" parameter: uint64_t RvcBranchFixup = DF->getFixupKindInfo(RISCV::fixup_riscv_rvc_branch).TargetOffset; uint64_t RvcJumpFixup = DF->getFixupKindInfo(RISCV::fixup_riscv_rvc_jump).TargetOffset; 4. Check if the fixup type is ""Resolved"" and if it is, return false as there is no need for relaxation: if (Fixup.getKind() == RISCV::fixup_riscv_rvc_branch && Value == 254) { return false; } if (Fixup.getKind() == RISCV::fixup_riscv_rvc_jump && Value == 2046) { return false; } 5. If the fixup type is not resolved, check if it matches the target specific values for RvcBranchFixup and RvcJumpFixup and if it does, return true as relaxation is required: if (Fixup.getKind() == RISCV::fixup_riscv_rvc_branch && Value == RvcBranchFixup) { return true; } if (Fixup.getKind() == RISCV::fixup_riscv_rvc_jump && Value == RvcJumpFixup) { return true; }" LLVM,RISCV,47,"void writeNopData(raw_ostream &OS, uint64_t Count) { if (Count % 4 != 0) { errs() << ""Count must be a multiple of 4 bytes\n""; return; } for (int i = 0; i < Count; i += 4) { MCInst Inst; Inst.setOpcode(RISCV::NOP); Inst.addOperand(MCOperand::createReg(RISCV::X0)); MCSubtargetInfo STI(""riscv-unknown-elf""); STI.setDefaultFeatures(""riscv,riscv,riscv::FeatureStdExtC""); std::unique_ptr Emitter ( GET_RISCV_MC()->createMCCodeEmitter(*STI.getTargetTriple(), *STI.getFeatureBits()) ); raw_hex_ostream RawStream(OS); Emitter->encodeInstruction(Inst, RawStream, SmallVectorImpl()); MCAsmLayout Layout; std::unique_ptr Context(new MCContext(STI, Layout)); Layout.getAssemblerLayout().setSectionAddress(&Layout, 0x0); Layout.getAssemblerLayout().setDefaultSection("".text"", 0); RawStream.SetBuffer(); RawStream << ""0x"" << format(""%08"" PRIx64, Layout.getSymbolOffset(*Context.get().getDummyFragment())); } }" LLVM,RISCV,48,"void applyFixup(const MCValue &Value, MutableArrayRef Data, uint64_t Offset, const MCExpr *Fixup, const MCFixupKind &Kind, const MCAsmBackend &MAB) { const MCFixupKindInfo &FixupInfo = MAB.getFixupKindInfo(Kind); uint64_t FixupValue = Value.getConstant(); Data[Offset + FixupInfo.TargetOffset] |= (FixupValue >> FixupInfo.TargetOffset); }" LLVM,RISCV,49,"bool shouldInsertExtraNopBytesForCodeAlign(const MCSubtargetInfo &STI, uint64_t CodeAlign) { const FeatureBitset &Features = STI.getFeatureBits(); if (STI.getTargetTriple().getArch() != Triple::riscv || !Features.test(RISCV::FeatureRelax)) { return false; } if (Features.test(RISCV::FeatureStdExtC)) { return false; } if (CodeAlign != 2 && CodeAlign != 4) { return false; } return true; }" LLVM,RISCV,50, LLVM,RISCV,51, LLVM,RISCV,52,bool shouldForceRelocation(RelocationRef& Relocation) { if (Relocation.getType() == RISCV::fixup_riscv_got_hi20 || Relocation.getType() == RISCV::fixup_riscv_tls_got_hi20 || Relocation.getType() == RISCV::fixup_riscv_tls_gd_hi20 || Relocation.getType() == RISCV::fixup_riscv_pcrel_lo12_i || Relocation.getType() == RISCV::fixup_riscv_pcrel_lo12_s || Relocation.getType() == RISCV::fixup_riscv_pcrel_hi20) { if (RISCV::FeatureRelax.isEnabled()) return true; else return false; } return false; } LLVM,RISCV,53,"void applyFixup(MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef Data, uint64_t Value) { uint64_t Offset = Fixup.getOffset(); uint64_t FixupValue = Fixup.getValue(); switch (Fixup.getKind()) { case RISCV::fixup_riscv_hi20: FixupValue <<= 12; case RISCV::fixup_riscv_lo12_i: Value += FixupValue; uint64_t Mask = (1 << 12) - 1; if (Fixup.getKind() == RISCV::fixup_riscv_hi20) { Mask <<= 12; }" LLVM,RISCV,54,"void applyFixup(MCDataFragment &DF, MCValue &FixupVal, MCValue::TargetSpecificFlags TSFlags, uint64_t Offset, MCContext *Ctx){ const MCSymbolRefExpr *A = FixupVal.getSymA(); uint64_t SymAddress = 0; if (A) { if (const MCSymbol *Sym = A->getSymbol()) { SymAddress = Sym->getAbsoluteAddr(); } else { return; } }" LLVM,RISCV,55, LLVM,RISCV,56," bool fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, uint64_t Value, const MCRelaxableFragment *DF, const MCAsmLayout &Layout, MCInst &Instr, uint64_t Address) const { switch (Fixup.getTargetKind()) { case RISCV::fixup_riscv_rvc_branch: int64_t Min = -256; int64_t Max = 255; if (Value < Min || Value > Max) { Instr.setOpcode(RISCV::C_LUI); return true; } return false; case RISCV::fixup_riscv_rvc_jump: Min = -2048; Max = 2047; if (Value < Min || Value > Max) { Instr.setOpcode(RISCV::C_LUI); return true; } return false; default: return false; } }" LLVM,RISCV,57,bool mayNeedRelaxation(Instruction* instr) { if (instr->getDesc().TSFlags == RISCV) { if (instr->hasUnsupportedFeatures()) { return true; } if (instr->hasOutOfRangeImmediateOperands()) { return true; } return false; } return false; } LLVM,RISCV,58, LLVM,RISCV,59,"bool shouldForceRelocation(const Relocation& Reloc, const TargetMachine& TM) { switch (Reloc.Kind) { case RISCV::fixup_riscv_pcrel_lo12_i: case RISCV::fixup_riscv_pcrel_lo12_s: return true; case RISCV::fixup_riscv_pcrel_hi20: if (TM.getSubtarget().hasFeature(RISCV::FeatureRelax)) return true; else return false; default: errs() << ""Unexpected fixup kind for pcrel_lo12\n""; return false; } }" LLVM,RISCV,60,"bool writeNopData(int count, raw_ostream &output) { if (!output) { return false; }" LLVM,RISCV,61,"MCOperand addExpr(MCContext *Ctx) { const MCExpr *Expr = RVVExpr::create(Ctx, 0, 0); MCOperand Op = MCOperand::createExpr(Expr); return Op; }" LLVM,RISCV,62,"define i32 @getReg() { entry: ; Load the value of the ""target specific value"" from memory %target_specific_value = load i32, i32* @NotARegister ret i32 %target_specific_value }" LLVM,RISCV,63,bool isImm(const Value *V) { if (isa(V)) { return true; } else if (isa(V) || isa(V)) { return false; } if (V->getType()->isRISCVSpecific()) { return true; } for (User *U : V->users()) { if (Instruction *I = dyn_cast(U)) { if (I->isRISCVSpecific() && I->getNumOperands() > 0 && I->getOperand(0) == V) { return true; } } } return false; } LLVM,RISCV,64,bool isMem(Operand *Op) { if (RISCVOperand *RO = dyn_cast(Op)) { if (RO->isLoadStoreOp()) { return true; } } if (LoadInst *LI = dyn_cast(Op)) { return true; } else if (StoreInst *SI = dyn_cast(Op)) { return true; } return false; } LLVM,RISCV,65,"bool isReg(unsigned RegID) { List validRegIDs = [0, 1, 2, ..., 30, 31]; if (validRegIDs.contains(RegID)) { return true; } else { return false; } }" LLVM,RISCV,66,bool isToken(LLVMValueRef Op) { bool isRISCVtoken = false; if(LLVMIsAConstant(Op)) { if(LLVMIsAConstantInt(Op)) { isRISCVtoken = true; } else if(LLVMIsAConstantFP(Op)) { isRISCVtoken = true; } else if(LLVMIsAConstantAggregateZero(Op)) { isRISCVtoken = true; } } return isRISCVtoken; } LLVM,RISCV,67,"void MatchAndEmitInstruction(const char* RISCV, uint64_t RISCV, uint64_t op0, const char* missingFeature, const char* instructionRequires, int numOperands, int numOperandsRequired, int offset, int size, const char* description, int mode, uint64_t operand, const char* errorMsg, const char* invalidOperand, const char* invalidInstruction) { MCInst inst; unsigned int numMcOperands = 0; for (unsigned int i = 0; i < numOperands; i++) { MCOperand mcOp; switch (mode) { case RISCV: mcOp = ParseRISCVOperand(operand); break; default: llvm_unreachable(""Unsupported target specific mode!""); } inst.addOperand(mcOp); numMcOperands++; } if (numMcOperands < numOperandsRequired) { RISCVEncodingError(errorMsg); return; } emitToStreamer(RISCV, inst, mode); if (missingFeature) { RISCVEncodingError(description); return; } if (instructionRequires) { RISCVEncodingError(description); return; } if (invalidOperand) { RISCVEncodingError(invalidOperand); return; }" LLVM,RISCV,68, LLVM,RISCV,69,"llvm::Instruction* ParseInstruction(const std::string& instruction_str) { auto tokens = tokenize_instruction(instruction_str); if (tokens[0] != ""RISCV"") { return nullptr; } llvm::Instruction* instruction = new RISCVInstruction(tokens[1]); for (size_t i = 2; i < tokens.size(); ++i) { if (tokens[i] == ""RISCV"") { auto reg = parse_register(tokens[i + 1]); if (reg == -1) { std::cerr << ""Unexpected token in register argument\n""; return nullptr; } instruction->add_operand(reg); ++i; } else if (tokens[i] == ""0"") { auto imm = parse_immediate(tokens[i + 1]); if (imm == -1) { std::cerr << ""Unexpected token in immediate argument\n""; return nullptr; } instruction->add_operand(imm); ++i; } else { std::cerr << ""Unexpected token in argument list: "" << tokens[i] << ""\n""; return nullptr; } } return instruction; }" LLVM,RISCV,70,"void print(Operand *op, ostream &out) { if (op->isRISCV()) { RISCVValue val = op->getRISCVValue(); out << ""RISCV: "" << val << ""\n""; } else { out << ""Not implemented\n""; } }" LLVM,RISCV,71,"MCOperand addExpr(MCExpr *Expr, Triple TargetTriple) { assert(Expr != NULL && ""Expr shouldn't be null!""); MCOperand Operand(MCExpr::Target, 0, Triple::normalize(TargetTriple), Triple::normalize(TargetTriple), Triple::normalize(TargetTriple)); Operand.setExpr(Expr); return Operand; }" LLVM,RISCV,72,InstructionOperand.getEndLoc(RISCV) { if(RISCV == true){ int last_token_loc = this->getEndTokenLocation(); return last_token_loc; } else{ return -1; } } LLVM,RISCV,73,llvm::DebugLoc getLoc( RS) { llvm::DebugLoc Dummy; return Dummy; } LLVM,RISCV,74,"int getReg(Edge &E) { if (E.getTarget().getTargetTriple().getArch() == llvm::Triple::riscv) { return E.getReg(); } llvm_unreachable(""Invalid type access! Target is not RISCV""); }" LLVM,RISCV,75,Location getStartLoc(Operand operand) { if (operand.isRegister()) { Register reg = operand.getRegister(); return reg.getLocation(); } else if (operand.isImm()) { Immediate imm = operand.getImm(); return imm.getLocation(); } else if (operand.isGlobal()) { GlobalData global = operand.getGlobal(); return global.getLocation(); } else if (operand.isExpr()) { Expression expr = operand.getExpr(); return expr.getLocation(); } else if (operand.isMemory()) { Memory mem = operand.getMemory(); return mem.getLocation(); } else { return null; } } LLVM,RISCV,76,bool isImm(Value *op) { unsigned opcode = op->getOpcode(); if (opcode == Instruction::Add || opcode == Instruction::LShr || opcode == Instruction::AShr || opcode == Instruction::And || opcode == Instruction::Or || opcode == Instruction::Xor || opcode == Instruction::Sub || opcode == Instruction::Mul || opcode == Instruction::UDiv || opcode == Instruction::URem || opcode == Instruction::FAdd || opcode == Instruction::FSub || opcode == Instruction::FMul || opcode == Instruction::FDiv || opcode == Instruction::FRem || opcode == Instruction::ICmp || opcode == Instruction::FCmp || opcode == Instruction::Call) { return false; } if (isa(op)) { return false; } return true; } LLVM,RISCV,77,bool isMem(const Operand *Op) { switch (Op->getType()) { case OpTypeLoad: case OpTypeStore: return true; case OpTypeLoadHalf: case OpTypeLoadByte: case OpTypeStoreHalf: case OpTypeStoreByte: return true; case OpTypeLoadFP: case OpTypeLoadFP16: case OpTypeStoreFP: case OpTypeStoreFP16: return true; default: return false; } } LLVM,RISCV,78,bool isReg(Operand op) { if (op.targetSpecificValue == RISCV) { return true; } else { return false; } } LLVM,RISCV,79,bool isToken(MachineOperand &MO) { bool isToken = false; if (MO.isReg()) { switch (MO.getReg()) { case RISCV::XTP: case RISCV::XP: case RISCV::XAP: case RISCV::XFP: case RISCV::XTMP: isToken = true; break; default: isToken = false; } } return isToken; } LLVM,RISCV,80,"const char *TargetRISCVDirectives[] = { ""RISCV"", ""RISCV"", "".option"", "".attribute"", "".insn"" }; void ParseDirective(llvm::MCStreamer &ST, llvm::StringRef Value, llvm::SourceMgr &SM) { bool hasDirective = false; for (int i=0; i<5; i++) { if (Value == TargetRISCVDirectives[i]) { hasDirective = true; break; } } if (!hasDirective) { SM.PrintMessage(SMLoc(), llvm::SourceMgr::DK_Error, ""invalid target specific directive for RISCV backend""); return; } parseDirective(ST); SM.PrintMessage(SMLoc(), llvm::SourceMgr::DK_Warning, ""ParseDirective is deprecated, use 'parseDirective' instead""); }" LLVM,RISCV,81,"Instruction ParseInstruction(std::string assemblyInstruction) { if (assemblyInstruction.find(""unexpected token"") != std::string::npos) { return ErrorInstruction(""Invalid instruction""); } else { if (RISCV::isSupportedInstruction(assemblyInstruction) && RISCV::FeatureRelax.isRequired(assemblyInstruction)) { Instruction instr = RISCV::parseInstruction(assemblyInstruction); return instr; } else { return ErrorInstruction(""Unsupported instruction""); } } }" LLVM,RISCV,82,"void print(Operand *op, Stream *S) { if (op->isRISCV()) { RISCVOperand *riscvOp = dyn_cast(op); if (riscvOp->isRegister()) { StringRef regName = riscvOp->getRegister(); *S << "" %"" << regName.str(); } else if (riscvOp->isNoRegister()) { *S << ""noreg""; } else if (riscvOp->isTargetSpecific()) { *S << RISCV; } else if (riscvOp->isSysRegister()) { StringRef sysRegName = riscvOp->getSysRegName(); *S << """"; } else if (riscvOp->isValueType()) { ValueType *valType = riscvOp->getValueType(); *S << ""getName() << ""\"">""; } else if (riscvOp->isConstant()) { Constant *c = riscvOp->getConstValue(); *S << c->getValueAsString(); } else { riscvOp->print(S); } } else { op->print(S); } }" LLVM,RISCV,83,"Register tryParseRegister(StringRef Register, const MachineRegisterInfo &MRI) { if (!Register.startswith(""RISCV,"")) { return nullptr; } if (Register.size() != 7 || !isdigit(Register[5]) || !isdigit(Register[6])) { return nullptr; } unsigned RegNum = Register.slice(6).getAsInteger(10); if (!MRI->getRegClass(RISCV::GPRRegClassID).contains(RegNum)) { return nullptr; } return Register; }" LLVM,RISCV,84,"bool validateTargetOperandClass(const MachineInstr *MI, unsigned OpIdx, unsigned OpClassID) { If (OpClassID == RISCV::FPR64RegClassID || OpClassID == RISCV::FPR64CRegClassID || OpClassID == RISCV::VRRegClassID || OpClassID == 0) { return true; } return false; }" LLVM,RISCV,85,"void ParseDirective(std::string Directive, RISCVTargetStreamer& Streamer) { if(Directive == "".option""){ Streamer.EmitRISCVOption(); } else if(Directive == "".attribute""){ Streamer.EmitRISCVAttribute(); } }" LLVM,RISCV,86, LLVM,RISCV,87,"void addExpr(RISCV &Expr) { assert(Expr != NULL && ""Expr shouldn't be null!""); InsertTargetSpecificValue(RISCV, Expr); }" LLVM,RISCV,88, LLVM,RISCV,89,"void print(const RISCVOperand& Operand, raw_ostream &Stream) { if (Operand.isReg()) { unsigned RegNum = Operand.getReg(); if (RegNum >= RISCV::X0 && RegNum <= RISCV::X31) { Stream << """"; } } if (Operand.isImm()) { int64_t ImmVal = Operand.getImm(); Stream << ImmVal; } Stream << ""''""; }" LLVM,RISCV,90," MachineBasicBlock::iterator MBBI) { RISCV::Opcode opc = MI->getOpcode(); if (RISCV::isPseudo(opc)) { switch (opc) { case RISCV::PseudoLI: { int64_t imm = MI->getOperand(1).getImm(); BuildMI(MBB, MBBI, MI->getDebugLoc(), TII->get(RISCV::LW)) .addReg(RISCV::X1) .addImm(0) .addReg(0); BuildMI(MBB, MBBI, MI->getDebugLoc(), TII->get(RISCV::ADDI)) .addReg(MI->getOperand(0).getReg()) .addReg(RISCV::X1) .addImm(imm) .addReg(0); MBB.erase(MBBI); break; }" LLVM,RISCV,91,"void print(Operand op, raw_ostream &OS) { if (op.isReg()) { unsigned int regNum = op.getReg(); OS << """"; } if (op.isTarget()) { int tsVal = op.getTargetFlagVal(); OS << ""'"" << tsVal << ""'""; if (op.isSystemRegister()) { StringRef sysReg = op.getSysReg(); OS << "" ""; } } OS.flush(); }" LLVM,RISCV,92,"bool tryParseRegister(MCInst &MI, const unsigned *RegNum) { if (MI.getNumOperands() == 0) { return false; } const MachineOperand &MO = MI.getOperand(0); if (MO.isReg()) { *RegNum = MO.getReg(); return true; } return false; }" LLVM,RISCV,93, LLVM,RISCV,94,Instr ParseInstruction(Instr Instruction) { } LLVM,RISCV,95, LLVM,RISCV,96,"void MatchAndEmitInstruction(MCStreamer &Out, MCOperand &MCKind, StringRef Mnemonic, ArrayRef Operands, SMLoc IDLoc, SMLoc EndLoc, const MCSubtargetInfo &STI) { if (Mnemonic.find(""RISCV"") != std::string::npos) { MCInst Inst; Inst.setOpcode(MCKind.getImm()); if (Inst.getOpcode() == 0) { Out.EmitInstruction(Inst, STI); return; } if (Operands.size() < 2) { errs() << ""Too few operands for instruction!""; return; } for (int i = 1; i < Operands.size(); i++) { if (Mnemonic[i] == 'i' || Mnemonic[i] == 'o' || Mnemonic[i] == 'r' || Mnemonic[i] == 'w') { if (!MCOperandIsReg(Operands[i]) && !MCOperandIsImm(Operands[i]) && !MCOperandIsMem(Operands[i])) errs() << ""Invalid operand for instruction!""; return; } } if (MCOperandIsImm(Operands[1])) { int ImmValue = Operands[1].getImm(); if (ImmValue % 2 != 0 || ImmValue < 0 || ImmValue > 65535) { errs() << ""Immediate must be a multiple of 2 bytes in the range!""; return; } } Out.EmitInstruction(Inst, STI); return; } errs() << ""Unrecognized instruction mnemonic!""; return; }" LLVM,RISCV,97,"Function ParseInstruction (instruction: string) { switch (instruction) { case ""RISCV"": return RISCV-specific value; case ""RISCV"": return RISCV-specific value; case ""RISCV"": return RISCV-specific value; default: return ""unexpected token""; } }" LLVM,RISCV,98, LLVM,RISCV,99, LLVM,RISCV,100,"void processInstruction(RISCVInst &I) { if (RISCV::isPseudoLI(I.getOpcode())) { I.addDebugInfoAnchor(RISCV::PseudoLI, 0, 1, 32); } else { } }" LLVM,RISCV,101," llvm::Register Reg, llvm::StringRef SysReg, unsigned int Size) { OS << RISCV << """"; OS << "" = ""; OS << ""'""; OS << SysReg << ""'""; OS << "":""; OS << 32; OS << """"; OS << ""\n""; }" LLVM,RISCV,102,"bool validateTargetOperandClass(MCInst &MI, unsigned OperandNum, unsigned *NewClass) { if (NewClass == nullptr) { return false; } if (MI.getNumOperands() < 2) { return false; } const MachineOperand &MO = MI.getOperand(OperandNum); if (MO.isReg()) { const TargetRegisterClass *RC = MRI.getRegClass(MO.getReg()); if (RC == &RISCV::GPR64RegClass || RC == &RISCV::FPR64RegClass || RC == &RISCV::FPR64CRegClass) { *NewClass = RC->getID(); return true; } } return false; }" LLVM,RISCV,103,"void print(raw_ostream &OS, MCOperand &MCOp) { if (MCOp.isRISCVMCOperand()) { OS << ""RISCV""; if (MCOp.hasReg()) { OS << """"; } if (MCOp.hasSysReg()) { OS << """"; } if (MCOp.hasSpecialCapReg()) { OS << """"; } if (MCOp.hasVType()) { OS << """"; } } }" LLVM,RISCV,104, LLVM,RISCV,105,"void print(operand op, ostream& stream) { if (op->getType() != RISCV) { cerr << ""Error: Operand is not of RISCV type."" << endl; return; } stream << ""RISCV,""; stream << ""getRegister() << "">""; stream << "">""; stream << ""'""; stream << op->getSysRegOperand() << ""'""; stream << ""'""; stream << ""isVecReg()) { stream << op->getVecRegOperand(); stream << "">""; stream << ""getVecType() << "">""; } }" LLVM,RISCV,106,"bool validateTargetOperandClass(MVT::SimpleValueType VT, const TargetRegisterClass *RC) { if (VT != MVT::i32 && VT != MVT::f32) return false; if (RC->getID() != RISCV::GPRRegClassID && RC->getID() != RISCV::FPR32RegClassID && RC->getID() != RISCV::FPR32CRegClassID) return false; if (!RC->isSubsetOf(RISCV::GPRRegClassID) && !RC->isSubsetOf(RISCV::FPR32RegClassID) && !RC->isSubsetOf(RISCV::FPR32CRegClassID)) return false; if (RC->getName() != ""RISCV"" && RC->getName() != ""RISCV"" && RC->getName() != ""RISCV"" && RC->getName() != ""RISCV"" && RC->getName() != ""RISCV"" && RC->getName() != ""RISCV::FPR32RegClassID"" && RC->getName() != ""RISCV"" && RC->getName() != ""RISCV::FPR32CRegClassID"") return false; return true; }" LLVM,RISCV,107,"void addExpr(MCInst &Inst, const MCExpr *Expr, MCContext &Ctx, uint64_t TSFlags, uint64_t RISCV) { if (Expr->isNullValue()) { llvm_unreachable(""Expr shouldn't be null!""); } MCOperand NewOperand = MCOperand::createExpr(Expr); NewOperand.setTargetFlags(TSFlags); NewOperand.setTargetCustomFlags(RISCV); Inst.addOperand(NewOperand); }" LLVM,RISCV,108,"if (MO.getTargetFlags() == RISCV) { int reg = MO.getReg(); return reg; } else { report_fatal_error(""Invalid type access!""); }" LLVM,RISCV,109,bool isImm(Operand operand) { if (operand.getValueType() == IMM_INT32) return true; else return false; } LLVM,RISCV,110,"define i1 @isReg(i8* %operand) { %is_register = icmp ne i8* %operand, null ret i1 %is_register }" LLVM,RISCV,111,bool isToken(const MachineOperand &MO) { return MO.isToken(); } LLVM,RISCV,112,"string ParseDirective(string directive) { if (directive == ""option"") { return parseDirective(directive); } if (directive != ""RISCV,RISCV,\"".option\"""") { return ""Error: Unknown directive for RISCV target!""; } return assemblyCode; }" LLVM,RISCV,113,"void ParseInstruction(Instruction &I, const AsmToken &Tok, RISCV::FeatureRelaxMode Mode, unsigned FeatureBits) { switch(Tok.getKind()) { case AsmToken::At: break; case AsmToken::Colon: break; case AsmToken::Comma: break; case AsmToken::Equal: break; case AsmToken::Identifier: I.setOpcode(RISCV::getInstructionCode(Tok.getString())); break; case AsmToken::Integer: uint64_t Val; if(Tok.getStringRef().getAsInteger(0, Val)) { report_fatal_error(""unexpected token""); } else { I.addOperand(MCOperand::createImm(Val)); } break; case AsmToken::EndOfStatement: break; case AsmToken::Minus: I.addOperand(MCOperand::createImm(-1)); break; case AsmToken::Plus: break; case AsmToken::String: unsigned RegNum = 0; if(Tok.getStringRef().substr(0,2) == ""r["") { if(!Tok.getStringRef().substr(2,1).getAsInteger(0, RegNum) && RegNum < 32) { I.addOperand(MCOperand::createReg(RegNum)); } } break; default: report_fatal_error(""unexpected token""); } }" LLVM,RISCV,114,"void print(raw_ostream &OS, const MachineOperand &MO) const { switch (MO.getType()) { case MachineOperand::MO_Register: OS << """"; break; case MachineOperand::MO_Immediate: OS << MO.getImm(); break; case MachineOperand::MO_MachineBasicBlock: OS << ""getNumber() << "">""; break; case MachineOperand::MO_ExternalSymbol: OS << """"; break; case MachineOperand::MO_GlobalAddress: OS << ""getName() << "">""; break; case MachineOperand::MO_RegisterMask: OS << ""getMaskNumber() << "">""; break; default: llvm_unreachable(""Unknown machine operand type!""); } OS << """"; }" LLVM,RISCV,115, LLVM,RISCV,116,bool validateTargetOperandClass(MCOperandClassID ClassID) { switch (ClassID) { case RISCV::RegisterClassID: return true; case RISCV::CTRLRegClassID: return true; case RISCV::PCCRegClassID: return true; case RISCV::GPRRegClassID: return true; case RISCV::FPRRegClassID: return true; case RISCV::FPR32RegClassID: return true; case RISCV::FPR32CRegClassID: return true; default: return false; } } LLVM,RISCV,117,"void EmitInstruction(MachineInstr *MI, MachineBasicBlock *MBB, MachineFunction *MF, raw_ostream &OS) { unsigned RiscvOpcode = MI->getOpcode(); bool HazardDetected = CheckHazard(RiscvOpcode); if (HazardDetected) { EmitNop(MF, OS); } MI->print(OS); UpdateHazardState(RiscvOpcode); return; }" LLVM,RISCV,118,"void PrintAsmMemoryOperand(MachineInstr *MI, const char *AsmVariant) { LLVM_DEBUG(dbgs() << ""Printing operand of INLINEASM instruction:\n""); unsigned OpIdx = MI->getOperand(0).getImm(); if (MI->getOpcode() != TargetOpcode::INLINEASM) { LLVM_DEBUG(dbgs() << ""Not an INLINEASM instruction!\n""); return; } const MachineOperand &MO = MI->getOperand(OpIdx); int RISCV = MO.getMetadata(""RISCV"")->getValue(); if (RISCV != 1) { LLVM_DEBUG(dbgs() << ""Incorrect target-specific value for RISCV!\n""); return; } const MachineOperand &MemOperand = MI->getOperand(OpIdx+1); LLVM_DEBUG(dbgs() << ""AsmVariant: "" << AsmVariant << ""\n""); LLVM_DEBUG(dbgs() << ""Operand: "" << MemOperand.getAsString() << ""\n""); }" LLVM,RISCV,119,"void PrintAsmOperand(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &OS) { OpNum = (OpNum - 1) * 2 + RISCV::OpShift; if (MI->getOperand(OpNum).isReg()) { unsigned Reg = MI->getOperand(OpNum).getReg(); OS << '%' << RISCVInstPrinter::getRegisterName(Reg); } else { unsigned Imm = MI->getOperand(OpNum).getImm(); if (STI.getFeatureBits()[RISCV::FeatureB]) { OS << ""0x"" << format_hex(static_cast(Imm), 8); } else { OS << Imm; } } }" LLVM,RISCV,120,void RISCVTargetMachine::runOnMachineFunction(MachineFunction &MF){ } LLVM,RISCV,121,"void EmitInstruction(const TargetInstrInfo &TII, MachineInstr *MI, MachineBasicBlock &MBB, MachineFunction &MF, unsigned Subtarget) { switch (MI->getOpcode()) { case RISCV::ADD: { const MachineOperand &Op0 = MI->getOperand(0); const MachineOperand &Op1 = MI->getOperand(1); const MachineOperand &Op2 = MI->getOperand(2); unsigned DestRegID = Op0.getReg(); unsigned SrcRegID1 = Op1.getReg(); unsigned SrcRegID2 = Op2.getReg(); TII.emitAddInstruction(RISCV, RISCV, RISCV, DestRegID, SrcRegID1, SrcRegID2); RISCV.advanceHazardState(DestRegID, SrcRegID1, SrcRegID2, Subtarget); break; } case RISCV::SUB: { const MachineOperand &Op0 = MI->getOperand(0); const MachineOperand &Op1 = MI->getOperand(1); const MachineOperand &Op2 = MI->getOperand(2); unsigned DestRegID = Op0.getReg(); unsigned SrcRegID1 = Op1.getReg(); unsigned SrcRegID2 = Op2.getReg(); TII.emitSubInstruction(RISCV, RISCV, RISCV, DestRegID, SrcRegID1, SrcRegID2); RISCV.advanceHazardState(DestRegID, SrcRegID1, SrcRegID2, Subtarget); break; } default: TII.emitInstruction(MI, MBB); RISCV.advanceHazardState(Subtarget); break; } }" LLVM,RISCV,122,"function emitInstruction(opcode, operand1, operand2) { if (target == ""RISCV"") { emit(opcode, operand1, operand2, ""RISCV""); emit(opcode, operand1, operand2, ""RISCV""); emit(opcode, operand1, operand2, ""RISCV""); } else { throw Error(""Target not supported.""); } }" LLVM,RISCV,123," const char *AddressFn) { if (MI->getOpcode() != TargetOpcode::INLINEASM) return; MachineOperand MO = MI->getOperand(OpNo); if (!MO.isMem()) return; MachineOperand AddrMO = MO.getMemOperand()->getValue(); const MCExpr *AddrExpr = nullptr; if (AddrMO.isGlobal() || AddrMO.isCPI()) { const GlobalValue *GV = AddrMO.getGlobal(); AddrExpr = AP.getSymbol(AddrMO, GV); } else { AddrExpr = AddrMO.getExpr(); } const MCExpr *OffsetExp = MCConstantExpr::create(Offset, AP.OutContext); const MCExpr *ModifierExp = MCSymbolRefExpr::create(Modifier, AP.OutContext); AddrExpr = MCBinaryExpr::createAdd(AddrExpr, OffsetExp, AP.OutContext); AddrExpr = MCBinaryExpr::createAdd(AddrExpr, ModifierExp, AP.OutContext); std::string TargetSpecificValue = RISCV::RISCV; std::string AddrFn = AddressFn; AP.EmitToStreamer(AddrExpr, AP.getMAI(), 0, false, false, AddrFn, TargetSpecificValue); if (ExtraCode) AP.OutStreamer->EmitBytes(ExtraCode); }" LLVM,RISCV,124,"void RISCVTargetMachine::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &O) { assert(MI && ""Invalid MachineInstr pointer""); const MachineOperand &MO = MI->getOperand(OpNo); const unsigned Opcode = MI->getOpcode(); switch (Opcode) { case RISCV::RISCV: O << getRISCVTargetLowering()->getAsmOperandString(MO, Opcode); break; default: llvm_unreachable(""Unhandled opcode on PrintAsmOperand""); } }" LLVM,RISCV,125," raw_ostream &O) { assert(MI && ""Invalid machine instruction""); assert(OpNo < MI->getNumOperands() && ""Operand number out of range""); const MachineOperand &MO = MI->getOperand(OpNo); if (MO.isImm()) { O << MO.getImm(); } else if (MO.isReg()) { O << RISCVInstPrinter::getRegisterName(MO.getReg()); } else if (MO.isFPImm()) { O << MO.getFPImm(); } else if (MO.isSymbol()) { O << RISCVInstPrinter::getSymbolPrefix() << MO.getSymbolName() << RISCV::X0; } else if (MO.isExpr()) { O << MO.getExpr(); } else if (MO.isGlobal()) { O << MO.getGlobal(); } else if (MO.isMBB()) { O << *MO.getMBB(); } else { llvm_unreachable(""Unknown operand type""); } if (ExtraCode && ExtraCode[0] != '\0') O << ExtraCode; }" LLVM,RISCV,126,"function emitBasicBlockEnd(BB) { if (target == RISCV) { emit ""RISCV""; emit ""RISCV""; emit ""RISCV""; emit ""RISCV""; emit ""RISCV""; } else { return ""Target not supported.""; } }" LLVM,RISCV,127,"void RISCVTargetMachine::emitBasicBlockStart() { std::cout << ""RISCV, RISCV, RISCV, RISCV, RISCV"" << std::endl; }" LLVM,RISCV,128, LLVM,RISCV,129, LLVM,RISCV,130,"void PrintAsmOperand(Value *Operand, AsmPrinter &AP) { RISCVCastInst *O = dyn_cast(Operand); if (O && O->isReg()) { int RegNum = O->getRegNum(); AP.OutStreamer->AddComment(""%"" + Twine(RegNum)); AP.printRegName(RegNum); } else { AP.printOperand(Operand, false); } }" LLVM,RISCV,131,"void lowerOperand(MCInst &Inst, const MachineOperand &MO, const RISCVSubtarget &STI) { MCContext &Context = MF->getContext(); MCInstLowering.lowerOperand(Inst, MO, STI); if (MO.isGlobal()) { const GlobalValue *GV = MO.getGlobal(); const MCExpr *expr = lowerConstantWithAlignment(GV, Context); if (expr) MO.setTargetFlags(expr); return; } switch(MO.getType()) { default: LLVM_DEBUG(dbgs() << ""Cannot lower operand: "" << MO << ""\n""); report_fatal_error(""unknown operand type""); return; case MachineOperand::MO_GlobalAddress: LLVM_DEBUG(dbgs() << ""Cannot lower global addresses for RISCV "" << MO << ""\n""); report_fatal_error(""unsupported operand type for RISCV""); return; case MachineOperand::MO_RegisterMask: return; } }" LLVM,RISCV,132, LLVM,RISCV,133, LLVM,RISCV,134, LLVM,RISCV,135,"void RISCVTargetMachine::emitStartOfAsmFile(raw_ostream &OS) { std::string TargetName = getName(); OS << ""\t.file\t\"":\"""" << TargetName << ""\""\n""; OS << ""\t.RISCV\t"" << TargetName << ""\n""; }" LLVM,RISCV,136,"std::string RISCVTargetMachine::getPassName() const { return std::string(""RISCV Assembly Printer""); }" LLVM,RISCV,137, LLVM,RISCV,138,"void PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &O, const char *Modifier = nullptr) { assert(MI->getOpcode() == TargetOpcode::INLINEASM && ""Not an inline asm instruction!""); const MCAsmInfo *MAI = MI->getMF()->getTarget().getMCAsmInfo(); assert(MI->getNumOperands() > OpNo && ""OpNo too large!""); unsigned CurOp = 0; unsigned BaseReg = 0; unsigned OffsetImm = 0; unsigned ScaleImm = 1; unsigned Global = 0; unsigned Symbol = 0; unsigned MicroMips = 0; unsigned Intel = 0; unsigned IsIndirect = 0; unsigned IsMem = 0; unsigned Implicit = 0; unsigned IsISD = 0; unsigned Size = 0; StringRef SymName; while (CurOp < OpNo) { assert(MI->getNumOperands() > CurOp && ""Invalid operand number!""); ++CurOp; } assert(MI->getOperand(CurOp).isGlobal() && ""Expected global operand!""); Global = CurOp++; assert(MI->InstrInfo->getGlobalSymbol(MI->getOperand(Global), SymName) && ""Symbol operand expected!""); if (MI->getOperand(CurOp).isReg()) { assert(MI->getOperand(CurOp).isReg() && ""Expected register operand!""); BaseReg = CurOp++; } else if (MI->getOperand(CurOp).isImm()) { assert(MI->getOperand(CurOp).isImm() && ""Expected immediate operand!""); OffsetImm = CurOp++; if (MI->getOperand(OffsetImm).isMachineConstantPoolIndex()) { Symbol = OffsetImm++; IsMem = 1; } if (MI->getOperand(OffsetImm).isCPI()) { Symbol = OffsetImm++; IsMem = 1; IsMemOp = 1; } if (ExtraCode != nullptr && !strcmp(ExtraCode, ""z"") &&MI->getOperand(OffsetImm).isImm()) { ScaleImm = OffsetImm++; } }" LLVM,RISCV,139," raw_ostream &OS) { const MachineOperand &MO = MI->getOperand(OpNo); switch (MO.getType()) { case MachineOperand::MO_Register: OS << RISCVInstPrinter::getRegisterName(MO.getReg()); break; case MachineOperand::MO_Immediate: OS << MO.getImm(); break; case MachineOperand::MO_GlobalAddress: OS << *getSymbol(MO.getGlobal()); break; default: llvm_unreachable(""Invalid operand type!""); } if (ExtraCode && ExtraCode[0]) OS << '@' << ExtraCode; if (OpNo + 1 < MI->getNumOperands()) OS << "", ""; }" LLVM,RISCV,140,"for (const Argument &AI : F.args()) { const auto &DL = F.getParent()->getDataLayout(); unsigned Reg = MF.addLiveIn(AI, &RISCV::GPRRegClass); MF.getRegInfo().addLiveRangeToEndOfBlock(Reg, MBB); MBB->addLiveIn(Reg); }" LLVM,RISCV,141,PredicateConstraint getConstraint(RISCV targetSpecificValue) { if (!isValidRISCV(targetSpecificValue)) { return null; } PredicateConstraint constraint = new PredicateConstraint(); RISCVCondition condition = targetSpecificValue.getCondition(); if (!isValidCondition(condition)) { return null; } constraint.setCondition(condition); return constraint; } LLVM,RISCV,142, LLVM,RISCV,143,"Function* getFormat(std::string target){ if(target == ""RISCV""){ return ""dwarf_riscv""; } else { return """"; } }" LLVM,RISCV,144,"const std::string getPassName() { return ""RISCV Branch Selector""; }" LLVM,RISCV,145,"void RISCVTargetLowering::lowerFormalArguments(MachineFunction &MF, const RISCVSubtarget &STI, LLVMContext &Ctx, const MachineRegisterInfo &MRI, SmallVectorImpl &InVals) const { MachineFrameInfo &MFI = MF.getFrameInfo(); uint32_t NumParams = MF.getFunction().getFunctionType()->getNumParams(); unsigned ArgOffset = STI.isRV64() ? 16 : 8; MachineBasicBlock &MBB = MF.front(); for (uint32_t i = 0; i < NumParams; ++i) { MVT VT = getRegisterType(MF.getDataLayout(), *MF.getFunction().getFunctionType()->param_begin(i)); const TargetRegisterClass *RC = getRegClassFor(VT); unsigned Reg = MF.getRegInfo().createVirtualRegister(RC); int FI = MFI.CreateFixedObject(VT.getSizeInBits() / 8, ArgOffset + i * VT.getSizeInBits() / 8, true); SDValue FIN = CurDAG->getTargetFrameIndex(FI, VT); SDValue Load = CurDAG->getLoad(VT, DL, MBB, FIN, MachinePointerInfo::getFixedStack(FI), false, false, false, 4); MBB.addLiveIn(Reg, RC); InVals.push_back(Load); } }" LLVM,RISCV,146,"define void @lowerReturn() { %1 = load i32, i32* @swifterrorval %pseudo_ret = RISCVISD::PseudoRET %1 ret i32 %pseudo_ret }" LLVM,RISCV,147,"void lowerCall(CallInst *CI, RISCVTargetMachine &TM, RISCVSubtarget &STI) { Function *F = CI->getCalledFunction(); CallingConv::ID CC = F->getCallingConv(); SmallVector Args; for (unsigned i = 0, e = CI->getNumOperands(); i != e; ++i) { if (i == 0) continue; Args.push_back(CI->getOperand(i)); } Intrinsic::ID ID; if (CC == CallingConv::C) ID = Intrinsic::riscv_call_c; else if (CC == CallingConv::Fast) ID = Intrinsic::riscv_call_fast; else llvm_unreachable(""Unknown calling convention""); Function *Callee = Intrinsic::getDeclaration(F->getParent(), ID); CallInst *NewCI = CallInst::Create(Callee, Args); NewCI->insertBefore(CI); CI->replaceAllUsesWith(NewCI); }" LLVM,RISCV,148, LLVM,RISCV,149, LLVM,RISCV,150,"static void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); AU.addRequired(); } ``` This implementation assumes that the RISCV backend requires the `SomeAnalysis` and `SomeOtherAnalysis` passes. If more passes are required by the RISCV backend, they can be added using the `au.addRequired()` function call. Another possible implementation of this function could be: ``` static void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addPreserved(); AU.addPreserved(); }" LLVM,RISCV,151,"StringRef getPassName(StringRef RISCV, Target Specific) { std::string passName = ""RISCV - Optimizer Pass""; passName += "" for "" + Target Specific; return StringRef(passName); }" LLVM,RISCV,152, LLVM,RISCV,153,"string Create(string opcode, int operand){ RISCVCP::RISCVCPModifier modifier = RISCV; RISCV target = RISCV; string instruction = opcode + "" "" + to_string(operand) + "", RISCV, RISCV, RISCVCP:RISCVCPModifier, RISCV""; return instruction; }" LLVM,RISCV,154, LLVM,RISCV,155, AnalysisUsage &AU) const override { AU.addRequired<...>(); AU.addPreserved<...>(); } LLVM,RISCV,156,"std::string getPassName(const PassInfo *PI) { std::string Name = PI->getPassName(); if (PI->getTargetMachineName() == ""RISCV"") { Name += "" (RISCV)""; } return Name; }" LLVM,RISCV,157, LLVM,RISCV,158,"llvm::StringRef getInstruction(const llvm::MCInst &Inst, uint64_t Address, const llvm::MCSubtargetInfo &STI, llvm::MCContext *Context) { std::string DisasmStr; llvm::raw_string_ostream DisasmStream(DisasmStr); const llvm::MCDisassembler *Disassembler = STI.getInstrInfo()->getDisassembler(); Disassembler->getInstruction(Inst, Address, DisasmStream, Context); return DisasmStream.str(); }" LLVM,RISCV,159,"RISCV::Instruction getInstruction(unsigned int Opcode){ std::string Instruction = ""Trying RISCV32 table :\n""; uint64_t TableEntry = 0; if (Opcode == RISCV::RISCV){ TableEntry = RISCV; } else { TableEntry = 0; } if (TableEntry < 0x3){ Instruction += ""Trying RISCV32Only_16 table (16-bit Instruction):\n""; Instruction += std::to_string(target_specific_value); } else{ Instruction += ""Trying RISCV_C table (16-bit Instruction):\n""; Instruction += std::to_string(target_specific_value); } return RISCV::Instruction(Opcode, TableEntry, 0, Instruction, 4, 2, 0, support::endian, RISCV::Feature64Bit ); }" LLVM,RISCV,160,"std::string getInstruction(uint32_t instruction, RISCVTarget const &target, unsigned char dest, unsigned char src1, unsigned char src2, int offset, llvm::support::endian endian){ InstructionInfo info = target.decodeInstruction(instruction); std::string disassembledInstruction; if(endian == llvm::support::little){ disassembledInstruction = info.mnemonic + "" ""; } else { disassembledInstruction = info.mnemonic + "" ""; } if(info.dest){ disassembledInstruction += ""x"" + std::to_string(dest) + "", ""; } disassembledInstruction += ""x"" + std::to_string(src1) + "", ""; if(info.src2){ disassembledInstruction += ""x"" + std::to_string(src2) + "", ""; } if(info.offset){ disassembledInstruction += std::to_string(offset); } return disassembledInstruction; }" LLVM,RISCV,161,"std::string getInstruction(RISCVTargetMachine &TM, const MCInst &MI, uint64_t Address, raw_ostream &OS, bool isBranch, bool isSymb){ RISCVInstPrinter &IP = *static_cast(TM.getSubtargetImpl()->getMCInstPrinter()); MCDisassembler *DisAsm = TM.getTarget().createMCDisassembler(*TM.getSubtargetImpl()->getMCInstrInfo(), *TM.getSubtargetImpl()->getMCRegInfo(), *TM.getSubtargetImpl()->getMCSubtargetInfo(), nullptr); MCInstPrinterOptions Opts; Opts.MI = &MI; Opts.FP = &IP; IP.printInstruction(MI, Address, Opts, OS); if (isBranch) OS << ""\nBranch Instruction""; if (isSymb) OS << ""\nSymbol: ??""; return OS.str(); }" LLVM,RISCV,162, LLVM,RISCV,163, LLVM,RISCV,164,"std::string getInstruction(const MCInst &Inst, uint64_t Address, const MCSubtargetInfo &STI, raw_ostream &OS, bool Verbose) { SmallString<64> InstructionStr; MCContext &Ctx = STI.getContext(); const MCRegisterInfo *RegInfo = STI.getRegisterInfo(); unsigned Opcode = Inst.getOpcode(); unsigned Mode = STI.getFeatureBits(); const MCInstrInfo *MII = STI.getInstrInfo(); const MCInstrDesc &Desc = MII->get(Opcode); const MCInstrDesc *Table = &MII->get(Opcode, Mode, Desc.isPseudo()); MCDisassembler *Dis = STI.getDisassembler(); if (Dis->tryToDisassembleInst(InstructionStr, Opcode, Address, Inst, 0, *Table)) { return InstructionStr.str(); } else if (Verbose) { errs() << ""Unable to disassemble instruction at 0x"" << Twine::utohexstr(Address) << "" (opcode = 0x"" << Twine::utohexstr(Opcode) << "")\n""; } return """"; }" LLVM,RISCV,165,"StringRef getInstruction(uint32_t Opcode) { switch (Opcode) { case RISCV::ADD: return ""ADD x0, x0, x0""; case RISCV::ADDI: return ""ADDI x0, x0, 0""; case RISCV::SUB: return ""SUB x0, x0, x0""; case RISCV::AND: return ""AND x0, x0, x0""; case RISCV::SLL: return ""SLL x0, x0, x0""; case RISCV::SLT: return ""SLT x0, x0, x0""; case RISCV::SLTI: return ""SLTI x0, x0, 0""; case RISCV::LOAD: return ""LOAD x0, 0(x0)""; case RISCV::STORE: return ""STORE x0, 0(x0)""; case RISCV::ADDI4SPN: return ""ADDI4SPN x0, 0""; case RISCV::MVI: return ""MVI x0, 0""; case RISCV::AUIPC: return ""AUIPC x0, 0""; case RISCV::SB: return ""SB x0, 0(x0)""; case RISCV::SH: return ""SH x0, 0(x0)""; case RISCV::BEQ: return ""BEQ x0, x0, 0""; case RISCV::BNE: return ""BNE x0, x0, 0""; default: return ""Unknown instruction""; } }" LLVM,RISCV,166,"void emitValueImpl(unsigned Value, unsigned Size) { if (Value == 0) { if (RISCV == 0) { RISCV = Value; } } }" LLVM,RISCV,167,"void finish() { LiveIntervals *lis = MF->getAnalysis(); unsigned VirtualReg = 0; for (MachineRegisterInfo::virtreg_iterator I = MRI->virtreg_begin(), E = MRI->virtreg_end(); I != E; ++I) { unsigned Reg = *I; VirtualReg = std::max(VirtualReg, Reg); } for (LiveIntervals::iterator I = lis->begin(), E = lis->end(); I != E; ++I) { VNInfo *Reg = I->valno; if (!Reg->isAssignedReg()) { unsigned PhysReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); Reg->setHasReg(PhysReg); for (LiveInterval::iterator I = Reg->getLiveRange().begin(), E = Reg->getLiveRange().end(); I != E; ++I) { MachineInstr *MI = I->valno->def.getInst(); if (MI->getOpcode() == RISCV::ST) { MI->getOperand(I->valno->def.getOperand())->setReg(PhysReg); MI->getOperand(I->valno->def.getOperand() + 1)->setReg(PhysReg); } else if (MI->getOpcode() == RISCV::LD) { MI->getOperand(I->valno->def.getOperand())->setReg(PhysReg); MI->getOperand(I->valno->def.getOperand() + 1)->setReg(PhysReg); } else if (MI->isRegReg()) { MI->getOperand(I->valno->def.getOperand())->setReg(PhysReg); }" LLVM,RISCV,168,"StringRef getStreamer(StringRef Target) { if (Target == ""RISCV"") { return ""RISCV""; } else { return """"; } }" LLVM,RISCV,169,"void reset(Value *Def, Instance *Instance) { RISCVMachineFunctionInfo *MFI = MF->getInfo(); MFI->setReset(Def, Instance); }" LLVM,RISCV,170,"std::string getPassName(const llvm::PassInfo& PI) { std::string Target = ""RISCV""; if (PI.getTargetMachine() != nullptr && PI.getTargetMachine()->getTargetTriple().getArch() == llvm::Triple::riscv) { Target = ""RISCV""; } return Target; }" LLVM,RISCV,171,"void runOnMachineFunction(Module &M, TargetMachine &TM) { const TargetOptions &Options = TM.getOptions(); const RISCVTargetMachine &RISCVTM = static_cast(TM); const RISCVSubtarget &Subtarget = RISCVTM.getSubtarget(); Function *F = M.getFunction(""main""); std::unique_ptr Emitter(RISCVTM.createMCCodeEmitter(*F)); MachineFunctionInfo *MFI = new MachineFunctionInfo(RISCVTM); MachineFunctionPassManager MFPM(TM); MFPM.addPass(RISCVTM.createPass()); MFPM.addPass(createRISCVISelDag(RISCVTM)); FunctionLoweringInfo *FLI = new FunctionLoweringInfo(M, Subtarget); F->setFunctionLoweringInfo(FLI); MFPM.addPass(createRISCVISelDag(RISCVTM)); MFPM.run(*F, M); Emitter->finish(); MFI->dump(); }" LLVM,RISCV,172,"StringRef getPassName() { return ""RISCV Target Pass""; }" LLVM,RISCV,173, LLVM,RISCV,174,"std::string getPassName(const RISCVTargetMachine *TM) { if (TM->getTargetTriple().getArch() == llvm::Triple::riscv32) { return ""RISCV 32 bit Pass""; } else if (TM->getTargetTriple().getArch() == llvm::Triple::riscv64) { return ""RISCV 64 bit Pass""; } else { return ""RISCV Pass""; } }" LLVM,RISCV,175,"void runOnMachineFunction(RISCV riscv, RISCV riscv_opt, RISCV riscv_attrs) { for (BasicBlock &BB : MF) { for (Instruction &I : BB) { generateMachineCode(I, riscv, riscv_opt, riscv_attrs); } } }" LLVM,RISCV,176," RegScavenger *RS) { const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const uint32_t *CSRegs = TRI->getCalleeSavedRegs(&MF); for (unsigned i = 0; CSRegs[i] != 0; ++i) { const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(CSRegs[i]); if (RC == &RISCV::GR32BitRegClass) { SavedRegs.set(CSRegs[i]); unsigned SuperReg = TRI->getMatchingSuperReg(CSRegs[i], RISCV::sub_64, &RISCV::GR64BitRegClass); SavedRegs.set(SuperReg); } else { SavedRegs.set(CSRegs[i]); } } SavedRegs.set(RISCV::fp_64); SavedRegs.set(RISCV::fp); }" LLVM,RISCV,177," MachineBasicBlock::iterator MI) { if (!MF.getTarget().usesCallFrameSetupDestroy()) return; const RISCVInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); unsigned CallFrameSetupOpcode = 0; unsigned CallFrameDestroyOpcode = 0; for (auto &NextMI : make_range(std::next(MI), MBB.end())) { CallFrameSetupOpcode = NextMI.getOpcode(); CallFrameDestroyOpcode = NextMI.getOpcode(); if (CallFrameSetupOpcode == RISCV::ADJCALLSTACKDOWN) { NextMI.getOperand(0).setImm(0); NextMI.setDesc(TII.get(RISCV::ADDI)); } if (CallFrameDestroyOpcode == RISCV::ADJCALLSTACKUP) { NextMI.getOperand(0).setImm(0); NextMI.setDesc(TII.get(RISCV::ADDI)); } if (NextMI.getOpcode() == RISCV::sp_64) { NextMI.getOperand(0).setImm(0); NextMI.setDesc(TII.get(RISCV::ADDI)); } } }" LLVM,RISCV,178, LLVM,RISCV,179,"bool hasFP(const Function &F) { if (F.getParent()->getTargetTriple() == ""riscv64"" || F.getTarget().getName() == ""riscv"") { return true; } return false; }" LLVM,RISCV,180,bool hasReservedCallFrame(const MachineFunction &MF) { const MachineFrameInfo &MFI = MF.getFrameInfo(); int numArgs = MF.getNumArgs(); bool argsOnStack = MFI.getStackSize() != 0; if (numArgs <= RISCV::X8 && argsOnStack) { return true; } return false; } LLVM,RISCV,181,"bool spillCalleeSavedRegisters() { if (RISCV->getTargetTriple().isRISCV()) { unsigned int TargetSP = RISCV::sp; unsigned int TargetFP = RISCV::s0; unsigned int RetAddr = RISCV::ra; unsigned int RetAddr_64 = RISCV::ra_64; if (!MF->getSubtarget().getFrameLowering()->hasReservedCallFrame(MF)) { return false; } MachineFunction::StubListType Stubs = MF->getStubs(); unsigned int StubSize = Stubs.size(); bool IsLegal = MF->getSubtarget().isTargetDarwin(); for (int RegNum = 0; RegNum < TargetSymRegs.size(); ++RegNum) { unsigned int StackAddr = RISCV::sp; const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); unsigned int Reg = TargetSymRegs[RegNum]; if (!TargetReservedRegs_64(Reg, false)) continue; if (Reg == RetAddr) { StackAddr = RetAddr_64; } if (storeRegToStackSlot(*MF, *MBB, MI, StackAddr, Reg, *TRI)) { if (IsLegal) { BuildMI(*MBB, MI, MI->getDebugLoc(), MCRI->getCall(*MBB, MI->getDebugLoc()), StackAddr).addReg(RISCV::calleesaved_copy); } } else { ++NumStores; } } return true; } return false; }" LLVM,RISCV,182,bool canUseAsEpilogue(const MachineBasicBlock& MBB) { if (MBB.pred_size() != 1) { return false; } if (MBB.succ_size() != 1) { return false; } for (const MachineInstr& MI : MBB) { if (MI.isReturn()) { return true; } } for (const MachineInstr& MI : MBB) { if (MI.getDesc().TSFlags & RISCVII::ModifySP) { return false; } } for (const MachineInstr& MI : MBB) { if (MI.getDesc().TSFlags & RISCVII::ModifyRA) { return false; } } return false; } LLVM,RISCV,183,"bool canUseAsPrologue(MachineBasicBlock &MBB) { for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ++I) { if (MI.getOpcode() == RISCV::X5) return true; } return false; }" LLVM,RISCV,184, RegScavenger *RS) const { TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF); for (MCPhysReg CSReg : CSRegs) { SavedRegs.set(CSReg); } LLVM,RISCV,185, LLVM,RISCV,186," MachineBasicBlock::iterator I) const { MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); if (!TII.usesCallFrameSetupDestroy(MF)) return; DebugLoc DL = I->getDebugLoc(); const RISCVInstrInfo *RISCVII = static_cast(MF.getSubtarget().getInstrInfo()); unsigned FrameReg = RISCVII->getFrameRegister(MF); if (I->getOpcode() == RISCV::ADJCALLSTACKDOWN) { int NumBytes = I->getOperand(0).getImm(); if (NumBytes) { int FrameIdx = MFI.CreateStackObject(NumBytes, 16, false); BuildMI(MBB, I, DL, TII.get(RISCV::ADDI), FrameReg).addFrameIndex(FrameIdx). addImm(-NumBytes); MBB.erase(I); } }" LLVM,RISCV,187,"void emitEpilogue() { store ""RISCV"" into the target specific value store ""RISCV"" into the target specific value store ""RISCV"" into the target specific value store ""RISCV"" into the target specific value store ""frame pointer should not have been eliminated"" into the target specific value store 0 into the target specific value store ""SecondSPAdjustAmount should be greater than zero"" into the target specific value }" LLVM,RISCV,188,int getStackIDForScalableVectors(TargetMachine &TM) { Triple TargetTriple(TM.getTargetTriple()); if(TargetTriple.getArch() == Triple::riscv) { return (1 << 7) + (1 << 2); } else { return 0; } } LLVM,RISCV,189,"bool hasFP(const Function &F) { if (F.getParent()->getTargetTriple().find(""riscv"") != std::string::npos) { if (F.hasFnAttribute(""noredzone"")) { return false; } else { return true; } } else { return false; } }" LLVM,RISCV,190, LLVM,RISCV,191,"void RISCVTargetLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF) const { const RISCVSubtarget &Subtarget = MF.getSubtarget(); const RISCVFrameLowering *TFI = getFrameLowering(MF); MachineFrameInfo &MFI = MF.getFrameInfo(); int64_t StackSize = TFI->estimateStackSize(MF); MFI.setStackSize(StackSize); MFI.setFrameSize(StackSize); const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); RISCVFunctionInfo *RISCVFI = MF.getInfo(); RISCVFI->setRAIndex(MFI.CreateStackObject(TRI->getSlotSize(), TRI->getSlotAlignment(), false)); RISCVFI->setFP(MFI.CreateStackObject(TRI->getSlotSize(), TRI->getSlotAlignment(), false)); RISCVFI->setBasePointerOffset(MFI.CreateStackObject(TRI->getSlotSize() * 11, TRI->getSlotAlignment(), false)); MFI.addFrameOffset(RISCV::X8, RISCVFI->getFP()); int NumRegs = RISCV::GPRRegClass.getNumRegs(); for (int i = 0; i < NumRegs; ++i) { if (i == RISCV::X2 || i == RISCV::X8) continue; MFI.CreateStackObject(TRI->getSlotSize(), TRI->getSlotAlignment(), false); }" LLVM,RISCV,192, LLVM,RISCV,193,"bool spillCalleeSavedRegisters() { if (!isProfitableToSpillCalleeSavedRegisters()) { return false; } for (unsigned int I = 0; I < RISCV::NumTargetCallArgRegs; ++I) { unsigned int Reg = RISCV::TargetCallArgRegs[I]; unsigned int Slot = getStackSlotForReg(Reg, MFI); BuildMI(*MBB, MI, DL, TII->get(RISCV::PseudoCALLReg), Reg) .addReg(Reg) .addImm(RISCVII::MO_CALL) .addFrameIndex(Slot); } return true; }" LLVM,RISCV,194," BitVector &CalleeSavedRegsDefs) { const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const MCPhysReg *SavedRegs = TRI->getCalleeSavedRegs(MF); std::vector SavedRegsList; for (unsigned i = 0; SavedRegs[i] != 0; ++i) { MCPhysReg Reg = SavedRegs[i]; if (RISCV::FPR32RegClass.contains(Reg) || RISCV::FPR64RegClass.contains(Reg)) continue; if (CalleeSavedRegsUses.test(Reg) || CalleeSavedRegsDefs.test(Reg)) { if (std::find(SavedRegsList.begin(), SavedRegsList.end(), Reg) == SavedRegsList.end()) { SavedRegsList.push_back(Reg); } } } return SavedRegsList; }" LLVM,RISCV,195, LLVM,RISCV,196,"void RISCVTargetLowering::eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { if (Subtarget->hasFrame()) { MachineInstr &MI = *MBBI; if (MI.getOpcode() == RISCV::ADJCALLSTACKDOWN) { Register FrameReg = MI.getOperand(1).getReg(); int FrameSize = MI.getOperand(2).getImm(); int FrameAlign = MI.getOperand(3).getImm(); adjustStack(MBB, MBBI, FrameReg, FrameSize, FrameAlign, false); } MBBI++; if (MBBI->getOpcode() == RISCV::ADJCALLSTACKUP) { Register FrameReg = MBBI->getOperand(1).getReg(); int FrameSize = MBBI->getOperand(2).getImm(); int FrameAlign = MBBI->getOperand(3).getImm(); adjustStack(MBB, MBBI, FrameReg, FrameSize, FrameAlign, true); } MBB.erase(MBBI); } }" LLVM,RISCV,197,"void emitEpilogue(Function *F, MachineFunction *MF) { MachineBasicBlock &MBB = MF->front(); MachineFrameInfo &MFI = MF->getFrameInfo(); const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo(); int64_t StackSize = MFI.getStackSize(); MachineFunction::iterator EIt = MF->end(); EIt--; MachineBasicBlock &FuncEnd = *EIt; MachineBasicBlock* MBB_End = MF->CreateMachineBasicBlock(); MF->insert(EIt, MBB_End); MBB_End->setHasAddressTaken(); MF->push_back(MBB_End); DebugLoc DL; MBB_End->addLiveIn(RISCV::X8); BuildMI(*MBB_End, MBB_End->end(), DL, TII->get(RISCV::ADDI)) .addReg(RISCV::SP) .addReg(RISCV::SP).addImm(StackSize).addReg(0) .setMIFlag(MachineInstr::FrameSetup); MBB_End->addLiveIn(RISCV::X8); DebugLoc DL_func; MCSymbol *Note = MF->getContext().createTempSymbol(); MBB_End->addLabels(Note); BuildMI(*MBB_End, MBB_End->end(), DL_func, TII->get(RISCV::EH_LABEL)) .addSym(Note); if (MFI.hasCalls() && allehframepseudos(*F)) { report_fatal_error(""frame pointer should not have been eliminated""); } }" LLVM,RISCV,198,"void emitPrologue(Function *F) { BasicBlock *BB = &(F->front()); Instruction *inst = &(BB->front()); BasicBlock *prologueBB = BasicBlock::Create(F->getContext(), ""prologue"", F); BB->getParent()->getBasicBlockList().insert(BB, prologueBB); while (inst != nullptr) { Instruction *nextInst = inst->getNextNode(); inst->removeFromParent(); inst->insertBefore(prologueBB->getTerminator()); inst = nextInst; } IRBuilder<> Builder(prologueBB); Builder.CreateCall(llvm::Intrinsic::getDeclaration(F->getParent(), llvm::Intrinsic::riscv_prologue)); } void emitEpilogue(Function *F) { BasicBlock *BB = &(F->back()); Instruction *inst = &(BB->back()); BasicBlock *epilogueBB = BasicBlock::Create(F->getContext(), ""epilogue"", F); BB->getParent()->getBasicBlockList().insertAfter(BB, epilogueBB); while (inst->getPrevNode() != nullptr) { Instruction *prevInst = inst->getPrevNode(); inst->removeFromParent(); inst->insertBefore(epilogueBB->getTerminator()); inst = prevInst; } IRBuilder<> Builder(epilogueBB); Builder.CreateCall(llvm::Intrinsic::getDeclaration(F->getParent(), llvm::Intrinsic::riscv_epilogue)); }" LLVM,RISCV,199,"MachineOperand getFrameIndexReference(int frameIndex) { unsigned BaseReg = RISCV::X2; int Offset = frameIndex * 8; unsigned OffsetLo = RISCV::RISCV; unsigned OffsetHi = RISCV::RISCV; MachineOperand MO = MachineOperand::CreateRefFrameIndex(frameIndex, Offset, BaseReg, OffsetLo, OffsetHi); return MO; }" LLVM,RISCV,200, LLVM,RISCV,201," MachineFunction &MF, AFI { const TargetSubtargetInfo &STI = MF.getSubtarget(); const RISCVSubtarget *Subtarget = &STI.getRISCV(); const TargetRegisterInfo *RegInfo = Subtarget->getRegisterInfo(); unsigned GPRReg = RegInfo->getEncodingValue(RISCV::GPRRegClass); RISCV11PhysReg = RegInfo->getDwarfRegNum(RISCV::X11, false); AFI->setShouldUseGPRClass(RISCV11PhysReg); AFI->setTargetSpecificValue(11); MF.getFrameInfo().finalizeFrameInfo(); }" LLVM,RISCV,202,"for (int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++ i) { int fixedOffset = MFI.getObjectOffset(i); int frameOffset = fixedOffset - FrameSize; MFI.setObjectOffset(i, frameOffset); }" LLVM,RISCV,203, LLVM,RISCV,204, LLVM,RISCV,205,"bool enableShrinkWrapping(const TargetMachine &TM) { const Triple &TT = TM.getTargetTriple(); return TT.getArch() == Triple::RISCV; } This function takes in a reference to the TargetMachine object and checks if the target's architecture is RISCV or not. If it is RISCV, then the function returns true, indicating that the target can correctly handle shrink wrapping. Otherwise, it returns false. You can also modify this function to check for other target specific values in addition to RISCV, if needed. For example, you can add a check for the target's OS and ABI to further refine the conditions under which shrink wrapping is enabled. The modified code may look like this: bool enableShrinkWrapping(const TargetMachine &TM) { const Triple &TT = TM.getTargetTriple(); return (TT.getArch() == Triple::RISCV) && (TT.getOS() == Triple::RISCV) && (TT.getABI() == Triple::RISCV); }" LLVM,RISCV,206,"Function* restoreCalleeSavedRegisters(LLVMContext &context, MachineFunction &MF) { const MCPhysReg *CalleeSavedRegs = MF.getRegInfo().getCalleeSavedRegs(); unsigned int numCalleeSavedRegs = MF.getRegInfo().getNumCalleeSavedRegs(); BasicBlock *BB = BasicBlock::Create(context, ""restore_registers"", MF); MachineInstr *FirstInst = &MF.front().front(); MachineIRBuilder MIB(FirstInst); for (unsigned int i = 0; i < numCalleeSavedRegs; ++i) { MCPhysReg Reg = CalleeSavedRegs[i]; int slot = MF.getRegInfo().getStackSlot(Reg); MIB.buildInstr(RISCV::LW) .addDef(Reg) .addFrameIndex(slot) .addImm(0); if (!MIB.getMI()) { report_fatal_error(""loadRegFromStackSlot didn't insert any code!""); return nullptr; } } MIB.buildInstr( RISCV::PseudoTAIL); MIB.buildInstr(RISCVII::MO_CALL); MIB.buildInstr(RISCV::PseudoRET); return MF.getFunction(); }" LLVM,RISCV,207,"bool spillCalleeSavedRegisters() { MachineModuleInfo &MFI = MF->getMMI(); for (unsigned i = 0; i < RISCV::NUM_TARGET_CALLEE_SAVED_REGS; ++i) { RISCV::Register Reg = RISCV::CALLEE_SAVED_REGS[i]; if (!MFI.regLiveOut(Reg)) { int FI = MFI.CreateFixedObject(RISCV::RISCV_TI*sizeof(int32_t), i*RISCV::RISCV_TI + RISCV::CALLEE_SAVED_REGS_COUNT); BuildMI(*MF, I, DL, TII.get(RISCV::PseudoCALLReg), RISCV::X5) .addReg(Reg, RegState::Kill).addFrameIndex(FI); } else { return false; } } return true; }" LLVM,RISCV,208, LLVM,RISCV,209,"void emitEpilogue() { StringRef epilog1 = ""RISCV""; StringRef epilog2 = ""RISCV""; StringRef epilog3 = ""RISCV""; StringRef epilog4 = ""RISCV""; StringRef epilog5 = ""frame pointer should not have been eliminated""; int epilog6 = 0; StringRef epilog7 = ""SecondSPAdjustAmount should be greater than zero""; SmallVector epilogValues = {epilog1, epilog2, epilog3, epilog4, epilog5, Twine(epilog6).str(), epilog7}; BasicBlock *epilogBlock = BasicBlock::Create(Context, ""epilog"", &F); IRBuilder<> epilogBuilder(epilogBlock); Type *returnType = F.getReturnType(); if (returnType->isVoidTy()) { epilogBuilder.CreateRetVoid(); } else { Value *returnValue = epilogBuilder.CreateLoad(FirstReturnReg); epilogBuilder.CreateStore(returnValue, StoreAddr); epilogBuilder.CreateRet(returnValue); }" LLVM,RISCV,210,"void determineFrameLayout(MachineFunction &MF) { const RISCVInstrInfo *TII = MF.getSubtarget().getInstrInfo(); const RISCVRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); MachineModuleInfo &MMI = MF.getMMI(); MFI.setObjectAlignment(Align(16)); MFI.setStackAlignment(Align(16)); unsigned MinCallFrameSize = alignTo(TFI->getOffsetOfLocalArea()+TFI->getStackAlignment(), 16); unsigned FrameSize = MFI.getStackSize() + MinCallFrameSize; for (int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { MachineObject *MO = MFI.getObjectPtr(i); if (!MO->isVariableLength() && MO->getSSPFlags() == TargetStackID::SSP::LiveIn) { MFI.setObjectOffset(MO, -FrameSize + MO->getOffset()); } }" LLVM,RISCV,211, LLVM,RISCV,212,"std::pair RISCVTargetLowering::getFrameIndexReference(const MachineFunction &MF, int FI) const { const Register BaseReg = RISCV::X2; int64_t Offset = MF.getFrameInfo().getObjectOffset(FI); return std::make_pair(BaseReg, Offset); } RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI) const { return std::make_pair(0, 1, 0, 1, RISCV::X2, RISCVABI::getBPReg(), RISCV::X2); } Register RISCVTargetMachine::getFrameIndexReference(const GlobalValue VM, const MachineFrameInfo MF) const { return RISCVFrameLowering::getFrameIndexReference(VM, MF); }" LLVM,RISCV,213,"bool canUseAsEpilogue(MachineBasicBlock &MBB) { if (MBB.getMBBFlag(RISCV::RISCV)) { for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ++I) { if (I->isReturn()) { return true; } } return false; } return false; }" LLVM,RISCV,214,bool canUseAsPrologue(MachineBasicBlock &MBB) { bool hasX5Register = false; for (MachineInstr &MI : MBB) { for (MachineOperand &MO : MI.operands()) { if (MO.isReg() && MO.getReg() == RISCV::X5) { hasX5Register = true; break; } } if (hasX5Register) break; } MachineInstr *LastInst = MBB.getLastNonDebugInstr(); if (!LastInst || !LastInst->isReturn()) return false; if (MBB.succ_size() != 1) return false; MachineBasicBlock *SuccMBB = *MBB.succ_begin(); bool isSuccPrologue = canUseAsPrologue(*SuccMBB); return hasX5Register || isSuccPrologue; } LLVM,RISCV,215,"emitEpilogue(functionName, framePointer, SecondSPAdjustAmount) { RISCV.emitComment(""Inserting epilogue code for "" + functionName); RISCV.emitMoveInstruction(framePointer, ""sp""); RISCV.emitAddInstruction(""sp"", ""sp"", SecondSPAdjustAmount); RISCV.emitRestoreInstruction(""sp"", framePointer); RISCV.emitReturnInstruction(); RISCV.emitCheckFramePointerElimination(); RISCV.emitCheckSecondSPAdjustAmount(SecondSPAdjustAmount); RISCV.emitComment(""End of epilogue code for "" + functionName); }" LLVM,RISCV,216, LLVM,RISCV,217,"std::vector determineCalleeSaves(const MachineBasicBlock &MBB, const MachineRegisterInfo &MRI, const std::vector &CalleeSavedRegs, const std::vector &BonusCalleeSavedRegs) { std::vector ActualCalleeSavedRegs; for (auto Reg : CalleeSavedRegs) { if (MRI.isReserved(Reg.getReg())) { ActualCalleeSavedRegs.push_back(Reg.getReg()); } else if (MRI.getTargetRegisterInfo()->isTypeLegal(Reg.getReg(), RISCV::FPR32RegClass)) { ActualCalleeSavedRegs.push_back(Reg.getReg()); } else if (MRI.getTargetRegisterInfo()->isTypeLegal(Reg.getReg(), RISCV::FPR64RegClass)) { ActualCalleeSavedRegs.push_back(Reg.getReg()); } else if (MRI.getTargetRegisterInfo()->isTypeLegal(Reg.getReg(), RISCV::CPURegClass)) { ActualCalleeSavedRegs.push_back(Reg.getReg()); } } for (auto Reg : BonusCalleeSavedRegs) { if (MRI.isReserved(Reg)) { ActualCalleeSavedRegs.push_back(Reg); } } " LLVM,RISCV,218, LLVM,RISCV,219,"void emitEpilogue(Function *F, MachineFunction &MF) { MachineBasicBlock &MBB = MF.front(); MachineFrameInfo &MFI = MF.getFrameInfo(); const RISCVInstrInfo *TII = MF.getSubtarget().getInstrInfo(); DebugLoc DL = MBB.findDebugLoc(MBB.getLastNonDebugInstr()); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); MachineBasicBlock *epilogueBB = MF.CreateMachineBasicBlock(); MF.push_back(epilogueBB); unsigned FramePtr = TRI->getFrameRegister(MF); unsigned StackSize = MFI.getStackSize(); MachineInstrBuilder AdjustStack = BuildMI(epilogueBB, DL, TII->get(RISCV::ADDI), RISCV::SP) .addReg(RISCV::SP).addImm(StackSize); MachineInstrBuilder AdjustFramePtr = BuildMI(epilogueBB, DL, TII->get(RISCV::ADDI), FramePtr) .addReg(FramePtr).addImm(StackSize); if (!MFI.isFrameAddressTaken() && MFI.getStackSize() == 0) { emitWarning(""frame pointer should not have been eliminated""); } const TargetOptions &Options = MF.getTarget().Options; int SecondSPAdjustAmount = Options.SecondSPAdjustAmount; if (SecondSPAdjustAmount > 0) { emitWarning(""SecondSPAdjustAmount should be greater than zero""); }" LLVM,RISCV,220,"RISCV::RegPair getFrameIndexReference(const RISCVInstrInfo& TII, int SPAdj, int64_t FrameIndex) { RISCV::RegPair RegPair; RegPair.Reg0 = RISCV::X2; RegPair.Reg1 = 0; const MachineRegisterInfo &MRI = TII.getMachineFunction().getRegInfo(); const TargetRegisterClass *RC = MRI.getRegClass(RISCV::GPRRegClassID); MachineInstrBuilder MIB = BuildMI(TII.getMachineFunction(), DebugLoc(), TII.get(RISCV::ADDI), RC->getRegister(0)); TII.getAddiInstr(MIB, RISCV::RISCV, FrameIndex); RegPair.Reg0 = MIB.getOperand(0).getReg(); RegPair.Reg1 = MIB.getOperand(1).getReg(); return RegPair; }" LLVM,RISCV,221,"bool RISCVFrameLowering::hasFP(const MachineFunction &MF) const { const Function &F = MF.getFunction(); const MachineFrameInfo &MFI = MF.getFrameInfo(); if (F.hasFnAttribute(""no-frame-pointer-elim"") || F.hasFnAttribute(""disable-tail-calls"")) return true; return MFI.getStackSize() > 0 || MF.getInfo().hasFP(); } Finally, in the `RISCVISelLowering.cpp` file, add the following code to set the FP register in the prologue and epilogue: void RISCVTargetLowering::eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { MachineInstr &MI = *I; DebugLoc DL = MI.getDebugLoc(); unsigned SPReg = getStackPointerRegisterToSaveRestore(); BuildMI(MBB, I, DL, TII.get(RISCV::ADDI), X0) .addReg(SPReg).addImm(-4).setMIFlag(MachineInstr::FrameSetup); MI.setDesc(TII.get(RISCV::ADDI)); MI.getOperand(0).setReg(SPReg); MI.getOperand(1).setReg(SPReg); MI.getOperand(2).setImm(-4); BuildMI(MBB, std::next(I), DL, TII.get(RISCV::ADDI), SPReg) .addReg(X0).addImm(4).setMIFlag(MachineInstr::FrameDestroy); }" LLVM,RISCV,222,"void RISCVTargetLowering::processFunctionBeforeFrameFinalized(Function &F, const TargetMachine &TM) { MachineFunction &MF = F.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); const RISCVSubtarget &STI = MF.getSubtarget(); const RISCVTargetMachine &RISCVTM = static_cast(TM); const TargetRegisterInfo *TRI = RISCVTM.getRegisterInfo(); unsigned RISCVVal = RISCV::GPRRegClass.getId(); MFI.addTargetSpillSlot(MFI.CreateStackObject(TRI->getSpillSize(RISCVVal), TRI->getSpillAlignment(RISCVVal), false), TRI->getSpillAlignment(RISCVVal)); TargetLoweringBase::processFunctionBeforeFrameFinalized(F, TM); }" LLVM,RISCV,223, LLVM,RISCV,224, LLVM,RISCV,225, LLVM,RISCV,226,void RISCVTargetMachine::processFunctionBeforeFrameFinalized(Function &F) { MachineFunction &MF = MF.getOrCreateMachineFunction(F); if (!EnableCheriPureCapABI || !MF.getSubtarget().isCheriPureCapABI()) return; MF.addLiveIn(RISCV::GPCRRegClass); MF.addLiveIn(RISCV::GPRRegClass); const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); MachineFrameInfo &MFI = MF.getFrameInfo(); if (MFI.hasVarSizedObjects() || MFI.hasCalls()) MFI.setAdjustsStack(true); if (Function *Thunk = F.getOnlyCalledFunction()) if (Thunk->hasFnAttribute(Attribute::CHERIKernel)) MFI.setRequiresCHERIABI(true); } LLVM,RISCV,227,"bool restoreCalleeSavedRegisters(MachineFunction &MF) { const RISCVSubtarget &STI = MF.getSubtarget(); const RISCVInstrInfo &TII = *STI.getInstrInfo(); const RISCVRegisterInfo &TRI = *STI.getRegisterInfo(); MachineBasicBlock &MBB = MF.front(); DebugLoc DL; const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs(); unsigned NumCSRegs = MF.getRegInfo().getNumCalleeSavedRegs(); if (!STI.isCheriPureCapABI()) { for (unsigned i = 0; i < NumCSRegs; ++i) { unsigned CSR = CSRegs[i]; if (CSR == RISCV::NoRegister) continue; if (!TRI.isPhysicalRegister(CSR)) continue; if (RISCV::GPR32RegClass.contains(CSR)) { BuildMI(MBB, MBB.getFirstNonPHI(), DL, TII.get(RISCV::LW)) .addReg(CSR) .addFrameIndex(CallerSavedStackSize + TRI.getReservedRegStackSpace()) .setMIFlag(MachineInstr::FrameSetup); } else { BuildMI(MBB, MBB.getFirstNonPHI(), DL, TII.get(RISCV::LD)) .addReg(CSR) .addFrameIndex(CallerSavedStackSize + TRI.getReservedRegStackSpace()) .setMIFlag(MachineInstr::FrameSetup); } }" LLVM,RISCV,228,"bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) { const TargetMachine &TM = MBB.getParent()->getTarget(); const RISCVSubtarget &Subtarget = TM.getSubtarget(); if (!Subtarget.isCheriPureCapABI()) { return false; } bool HasMadeChanges = false; for (const RISCVRegisterInfo::CheriPureCapCalleeSavedInfo &Info : RISCVRegisterInfo::getCheriPureCapCalleeSavedRegs()) { unsigned Reg = Info.RegNo; if (!TRI->isCalleeSavedPhysReg(Reg)) { continue; } int FrameIdx = MBB.getParent()->getFrameInfo().CreateSpillStackObject(Info.Size, Info.Alignment); TII.storeRegToStackSlot(MBB, MI, Reg, true, FrameIdx, Info.RC, TRI); HasMadeChanges = true; } return HasMadeChanges; }" LLVM,RISCV,229,"Register RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); const RISCVRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); return llvm::Register(getFrameRegister(MF), -MFI.getStackSize() + MFI.getObjectOffset(FI)); }" LLVM,RISCV,230, LLVM,RISCV,231,"for (unsigned int i = 0; i < NumSavedRegs; i++) { unsigned int Offset = -(i + 1) * 4; BuildMI(MBB, MBBI, DL, TII.get(RISCV::LW), RISCV::X10) .addReg(RISCV::SP) .addImm(Offset); BuildMI(MBB, MBBI, DL, TII.get(RISCV::ADDI), RISCV::X10) .addReg(RISCV::X10) .addImm(0); }" LLVM,RISCV,232,"void emitPrologue(Function *F, RISCVSpecificValues *RSV) { BasicBlock *prologueBB = BasicBlock::Create(F->getParent()->getContext(), ""prologue"", F); IRBuilder<> IRB(prologueBB); for (unsigned int i = 0; i < RSV->getCalleeSavedRegs().size(); i++) { LoadInst *loadInst = IRB.CreateLoad(RSV->getCalleeSavedRegs()[i]); IRB.CreateStore(loadInst, RSV->getStackPointer()->getOperand(0)); RSV->getStackPointer()->setOperand(0, IRB.CreateAdd(RSV->getStackPointer()->getOperand(0), ConstantInt::get(Type::getInt64Ty(F->getParent()->getContext()), 8))); }" LLVM,RISCV,233, LLVM,RISCV,234, LLVM,RISCV,235,"struct FrameIndexReference { unsigned int baseRegister; unsigned int offset; }; ``` Next, we need to define the function and its parameters. The getFrameIndexReference function should take in the MachineFunction and the MachineFrameInfo objects as parameters, as well as the stack object size. ``` FrameIndexReference getFrameIndexReference(const MachineFunction &MF, const MachineFrameInfo &MFI, unsigned int stackObjectSize) { } ``` Inside the function, we will first check the stack realignment and var-sized objects to determine if the combination is valid. If not, we will return an error message and set the base register and offset to 0. ``` if (MFI.hasVarSizedObjects() && MFI.getStackID() == TargetStackID::Default) { errs() << ""Unexpected combination of stack realignment and var-sized objects""; return {0, 0}; } ``` Next, we will get the RISCV specific register for frame index reference by using the getCallFrameSetupOpcode function of RISCVFrameLowering class. This function will return the index of the RISCV register we will use as the base register. ``` unsigned int riscvRegister = RISCVFrameLowering::getCallFrameSetupOpcode(); ``` Then, we need to determine the size of the frame index. If the frame index size is 4, we will use the x-stored indexing mode. ``` if (MFI.getStackSize() == 4) { return {riscvRegister, stackObjectSize}; } ``` Otherwise, if the frame index size is 8, we will use the d-stored indexing mode. ``` else if (MFI.getStackSize() == 8) { return {riscvRegister, 2 * stackObjectSize}; }" LLVM,RISCV,236,void processFunctionBeforeFrameFinalized(const MachineFunction &MF) { const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { if (MI.isTargetSpecific()) { switch (MI.getOpcode()) { case RISCV::GPRRegClass: break; case RISCV::GPRRegClass: break; case RISCV::GPRRegClass: break; } } } } } LLVM,RISCV,237,"void emitEpilogue(int RISCV, int RISCV, int RISCV, int RISCV, int SecondSPAdjustAmount) { if (frame pointer is eliminated) { emit_warning(""frame pointer should not have been eliminated""); return; } if (SecondSPAdjustAmount > 0) { emit_RISCV(RISCV); emit_RISCV(RISCV); emit_RISCV(RISCV); emit_RISCV(RISCV); emit_set_value(SecondSPAdjustAmount); emit_RISCV(RISCV); } else { emit_warning(""SecondSPAdjustAmount should be greater than zero""); return; } }" LLVM,RISCV,238,"void processFunctionBeforeFrameFinalized(llvm::MachineFunction &MF) { const llvm::RISCVSubtarget &STI = MF.getSubtarget(); const llvm::RISCVRegisterInfo *TRI = STI.getRegisterInfo(); llvm::MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetRegisterClass *CalleeSavedRC = TRI->getGPRRegClass(); const uint32_t *CalleeSavedRegs = TRI->getCalleeSavedRegs(&MF); for (unsigned i = 0; CalleeSavedRegs[i]; ++i) { MFI.setCalleeSavedInfo(CalleeSavedRegs[i], &CalleeSavedRC->getFrameIndexTargetRegClass()); } const int FrameSize = MFI.getStackSize(); const int FrameAlign = MFI.getMaxAlignment(); MF.setFrameInfo(llvm::TRI, FrameSize, FrameAlign); MF.setFunction(&RISCV::RISCV, RISCV::RISCV, RISCV::GPRRegClass, 11, 0, 0, 0, 0, 8, 0); }" LLVM,RISCV,239," BitVector &SavedRegs, RegScavenger *RS) { const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const MCPhysReg *RISCVSaveRegs = RISCV::CSR_SaveList; for (unsigned i = 0; RISCVSaveRegs[i] != 0; ++i) { unsigned Reg = RISCVSaveRegs[i]; if (MF.getRegInfo().isPhysRegUsed(Reg)) { if (RS && RS->isRegUsed(Reg)) continue; SavedRegs.set(Reg); const MCPhysReg *SubRegs = TRI->getSubRegisters(Reg); for (unsigned j = 1; SubRegs[j]; ++j) { unsigned SubReg = SubRegs[j]; SavedRegs.set(SubReg); } } } }" LLVM,RISCV,240,void emitEpilogue(Function *F) { if (F->getBasicBlockList().empty()) { return; } LLVM,RISCV,241,"bool restoreCalleeSavedRegisters(int& numInstructions, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL) { const Function &F = MF.getFunction(); const RISCVSubtarget &Subtarget = MF.getSubtarget(); const RISCVRegisterInfo *MRI = Subtarget.getRegisterInfo(); const RISCVFrameLowering *TFI = Subtarget.getFrameLowering(); if (TFI->hasReservedCallFrame(MF) || !F.getCallingConv() != CallingConv::C || F.getCallingConv() == CallingConv::PreserveAll || !Subtarget.is64Bit()) { return false; } MachineRegisterInfo &MRI = MF.getRegInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); const unsigned SlotSize = Subtarget.getFrameLowering()->getFramePointerSaveSize(); bool Res = true; for (const std::pair &CSI : MFI.getCalleeSavedInfo()) { unsigned Reg = CSI.first; unsigned RegSlot = CSI.second.getFrameIdx(); if (Reg != RISCV::X0 && MRI.isPhysRegModified(Reg) && !MRI.isReserved(Reg) && !MRI.isLiveIn(Reg)) { if (loadRegFromStackSlot(numInstructions, MF, DL, &MBB, I, Reg, RegSlot, Reg, TII)) { Res = false; continue; } } }" LLVM,RISCV,242, const TargetRegisterInfo *TRI) const { std::vector SavedRegs; const MCPhysReg* CalleeSavedRegs = TRI->getCalleeSavedRegs(&MF); unsigned Reg; while ((Reg = *CalleeSavedRegs++)) { if (Reg == RISCV::X1 || Reg == RISCV::X8) { SavedRegs.push_back(Reg); } } return SavedRegs; } LLVM,RISCV,243,4. if(MF.hasArguments()){ for(arg in MF.arguments()){ if(arg.passByRegister()){ frameLayout.frameOffset += frameLayout.registerSaveAreaSize } } } LLVM,RISCV,244, LLVM,RISCV,245,"void emitPrologue(MachineFunction &MF) { MachineBasicBlock &MBB = MF.front(); MachineBasicBlock::iterator MBBI = MBB.begin(); BuildMI(MBB, MBBI, DebugLoc(), TII.get(RISCV::ADDI), RISCV::X2) .addImm(RISCV::SP).addImm(0); BuildMI(MBB, MBBI, DebugLoc(), TII.get(RISCV::SW)) .addReg(RISCV::X1).addImm(0).addReg(RISCV::X2); BuildMI(MBB, MBBI, DebugLoc(), TII.get(RISCV::ADDI), RISCV::SP) .addReg(RISCV::SP).addImm(-16); BuildMI(MBB, MBBI, DebugLoc(), TII.get(RISCV::SD)) .addReg(RISCV::X2).addImm(-8).addReg(RISCV::X8); BuildMI(MBB, MBBI, DebugLoc(), TII.get(RISCV::ADDI), RISCV::X8) .addReg(RISCV::SP).addImm(-8); MF.getInfo()->setFramePointerSaveIndex(0); MF.getInfo()->setFramePointerSaveOffset(0); if(!MF.getInfo()->hasFramePointers()) { errs() << ""Shrink-wrapping not yet supported"" << ""\n""; errs() << ""emitPrologue doesn't support framepointer-less functions"" << ""\n""; return; } }" LLVM,RISCV,246,"unsigned getFrameIndexReference(int FrameIndex) { RISCV::Reg BaseReg = RISCV::X2; unsigned Offset = FrameIndex * 4; return MachineFrameInfo::getAddressingFrameIndexReference(BaseReg, Offset, RISCV::X2); }" LLVM,RISCV,247, LLVM,RISCV,248, LLVM,RISCV,249, LLVM,RISCV,250,"llvm::MachinePointerInfo getFrameIndexReference(const MachineFunction &MF, int FI, int &Offset) { const X86FrameLowering *TFL = MF.getSubtarget().getFrameLowering(); Offset = MF.getFrameInfo().getObjectOffset(FI); if (!TFL->hasFP(MF)) { return MachinePointerInfo(MF.getRegInfo().getStackRegister(), Offset); } Offset = TFL->getFrameIndexReferenceToFPOffset(MF, FI); return MachinePointerInfo(MF.getRegInfo().getFrameRegister(), Offset); }" LLVM,RISCV,251,void emitPrologue() { } void emitEpilogue() { } LLVM,RISCV,252, LLVM,RISCV,253, LLVM,RISCV,254, LLVM,RISCV,255, LLVM,RISCV,256, const RISCVInstrInfo &TII) { if (TII.usesCallFrameSetup(MF)) { MachineBasicBlock &MBB = MF.front(); MachineBasicBlock::iterator MBI = MBB.getFirstNonPHI(); while (MBI != MBB.end()) { MachineInstr &MI = *MBI; if (TII.isCallFrameSetup(MI) || TII.isCallFrameDestroy(MI)) { MBB.remove(MI); MBI++; } else { MBI++; } } } } LLVM,RISCV,257,"void emitEpilogue(Function &F) { MachineBasicBlock &MBB = F.back(); MachineFunction &MF = *MBB.getParent(); const RISCVInstrInfo *TII = MF.getSubtarget().getInstrInfo(); DebugLoc dl; const RISCVRegisterInfo *TRI = RISCV::RISCVRegisterInfo(); unsigned FP = TRI->getFrameRegister(MF); BuildMI(MBB, dl, TII->get(RISCV::ADDI), RISCV::X1).addReg(RISCV::X2).addImm(0).addMemOperand(MF.getMachineMemOperand(MF.getRoot(), MachineMemOperand::MOStore, 0, false)); BuildMI(MBB, dl, TII->get(RISCV::ADDI), RISCV::X1).addReg(RISCV::X3).addImm(0).addMemOperand(MF.getMachineMemOperand(MF.getRoot(), MachineMemOperand::MOStore, 0, false)); BuildMI(MBB, dl, TII->get(RISCV::ADDI), RISCV::X1).addReg(RISCV::X4).addImm(0).addMemOperand(MF.getMachineMemOperand(MF.getRoot(), MachineMemOperand::MOStore, 0, false)); if (MF.getFrameInfo().hasVarSizedObjects()) { BuildMI(MBB, dl, TII->get(RISCV::ADDI), FP).addReg(RISCV::SP).addImm(0).addMemOperand(MF.getMachineMemOperand(MF.getRoot(), MachineMemOperand::MOLoad, 0, false)); } else { llvm_unreachable(""Frame pointer should not have been eliminated""); }" LLVM,RISCV,258,"Function getFrameIndexReference(Offset, BaseReg, OffsetReg) { TargetSpecificValues = {RISCV, RISCV, RISCV, 0, 1, 0, 1, RISCV::X2, RISCVABI::getBPReg, RISCV::X2, 0, 0, 0} BaseReg = TargetSpecificValues[9] Offset = OffsetReg * 4 return {Offset, BaseReg} }" LLVM,RISCV,259,"bool hasFP(const llvm::Function &func) { if (func.getValueName() != ""RISCV,RISCV"") { return false; } if (func.hasFnAttribute(llvm::Attribute::FramePointer)) { return true; } return false; }" LLVM,RISCV,260, LLVM,RISCV,261, LLVM,RISCV,262,"bool restoreCalleeSavedRegisters(MachineFunction &MF) { bool DidChange = false; for (unsigned i = 0, e = MF.getRegInfo().getNumPhysRegs(); i != e; ++i) { if (!RISCV::GPRRegClass.contains(i)) continue; if (MF.getRegInfo().isCalleeSavedPhysReg(i)) { int FrameIndex = MF.getRegInfo().getFrameIndex(i); if (FrameIndex < 0) continue; if (!loadRegToStackSlot(MF, i, FrameIndex, RISCVII::MO_CALL)) { BuildMI(MF, MF.begin(), MF.findDebugLoc(MF.begin()), TII.get(RISCV::LW), i) .addFrameIndex(FrameIndex); DidChange = true; } } } return DidChange; }" LLVM,RISCV,263," const TargetRegisterInfo *TRI) const { if (CSI.empty()) { return false; } unsigned TotalSize = 0; for (const auto &CSE : CSI) { unsigned Reg = CSE.getReg(); const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); int FI = MFI->CreateFixedObject(RC->getSize(), 0, true); TotalSize += RC->getSize(); BuildMI(MBB, MI, DL, TII->get(RISCV::SW)) .addReg(Reg, RegState::Kill) .addFrameIndex(FI); MFI->setObjectOffset(FI, TotalSize); } return true; }" LLVM,RISCV,264,"void RISCVTargetMachine::runOnFunction(Function &F){} 2) In the function body, first retrieve the RISCV targeted value using the RISCVTargetMachine class and store it in a variable. const RISCVTargetLowering *RVTLI = getTargetLowering(); 3) Next, get the RISCV targeted value and store it in a variable. const RISCVInstrInfo *RISCVII = getInstrInfo(); 4) Then, get the RISCV targeted value and store it in a variable. const RISCVFrameLowering *RVFL = getFrameLowering(); 5) Now, use the targeted values to process the function as per the target specific requirements. For example, if the targeted value is ""Intrinsic::masked_gather"", the function can be processed using the masked gather instructions specific to RISCV architecture. 6) Similarly, if the targeted value is ""Intrinsic::masked_scatter"", process the function using the masked scatter instructions specific to RISCV architecture. 7) After the function processing is completed, update the LLVM IR with the targeted instructions. 8) Finally, return the updated function. For example, the final implementation of the runOnFunction function will look like this: void RISCVTargetMachine::runOnFunction(Function &F){ const RISCVTargetLowering *RVTLI = getTargetLowering(); const RISCVInstrInfo *RISCVII = getInstrInfo(); const RISCVFrameLowering *RVFL = getFrameLowering(); if(TargetedValue == Intrinsic::masked_gather){ } else if(TargetedValue == Intrinsic::masked_scatter){ } LLVMContext &Ctx = F.getContext(); RISCVII->adjustInstrPostInstr(F, Ctx); return F; }" LLVM,RISCV,265,void RISCVBackend::runOnFunction(Function &F) { for (BasicBlock &BB : F) { for (Instruction &I : BB) { if (I.isIntrinsic() && (I.getIntrinsicID() == Intrinsic::masked_gather || I.getIntrinsicID() == Intrinsic::masked_scatter)) { I.setTargetSpecificValue(RISCV); } } } } LLVM,RISCV,266,void getAnalysisUsage(AnalysisUsage &AU) const override \{ AU.addRequired(); AU.addRequired(); AU.setPreservesAll(); } LLVM,RISCV,267,"std::string RISCVTargetMachine::getPassName() const { return ""RISCV Gather/Scatter Lowering""; }" LLVM,RISCV,268, LLVM,RISCV,269, LLVM,RISCV,270, LLVM,RISCV,271,"void dump() { errs() << ""Dump the plan to stderr ( for debugging )\n""; errs() << RISCV << '\n'; errs() << ""\n""; errs().flush(); }" LLVM,RISCV,272, LLVM,RISCV,273, LLVM,RISCV,274,"void intersect(set_a, set_b) { RISCVVSETVLIInfo::getUnknown(target_specific_value); for (auto i = set_a.begin(); i != set_a.end(); ++i) { if (set_b.find(*i) == set_b.end()) { *i = target_specific_value; } } set_b.clear(); }" LLVM,RISCV,275,"StringRef getPassName() { return ""RISCV instruction selection and scheduling pass""; }" LLVM,RISCV,276,"void intersect(VSETVLIInfo &set1, const VSETVLIInfo &set2) { for (unsigned i = 0; i < set1.size(); i++) { if (set2.contains(set1[i])) { set1[i] = VSETVLIInfo::getUnknown(); } } } void intersect(VSETVLIInfo &set1, const VSETVLIInfo &set2) { if (getTarget().getTriple().getArch() == Triple::RISCV) { intersect(set1, set2); } }" LLVM,RISCV,277,"bool isUnknown(const MemDepResult &result, RISCV::TargetValue val) { if (result.isQuery()) { if (result.getQueryType() == MemDepResult::Unknown) { if (result.getTargetValue() == val) { return true; } } } return false; }" LLVM,RISCV,278,bool isValid(RISCVTargetIterator it) { return it->isValid(); } LLVM,RISCV,279,"RISCVTargetMachine::merge(RISCVTargetMachine &TM1, RISCVTargetMachine &TM2) { if (TM1.getTriple().isRISCVTriple() && TM2.getTriple().isRISCVTriple()) { Triple mergedTriple = mergeTargetTriples(TM1.getTriple(), TM2.getTriple()); if (isValidVSETVLInfo(mergedTriple)) { RISCVTargetMachine mergedTM = new RISCVTargetMachine(mergedTriple); return mergedTM; } else { printError(""Can only merge with a valid VSETVLInfo""); return null; } } else { printError(""Invalid target machines""); return null; } } Triple mergeTargetTriples(Triple T1, Triple T2) { Triple mergedTriple; mergedTriple.setCPU(T1.getCPU() + T2.getCPU()); mergedTriple.setVendor(T1.getVendor() + T2.getVendor()); mergedTriple.setOS(T1.getOS() + T2.getOS()); return mergedTriple; } bool isValidVSETVLInfo(Triple triple) { string VSETVLInfo = triple.getVSETVLInfo(); if (VSETVLInfo.empty()) { return false; }" LLVM,RISCV,280,"void runOnMachineFunction(const MachineFunction &MF) { const RISCVTargetMachine &TM = static_cast(MF.getTarget()); const RISCVSubtarget &STI = MF.getSubtarget(); const RISCVFunctionInfo *FuncInfo = MF.getInfo(); const RISCVRegisterInfo *RegInfo = TM.getRegisterInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); MFI.setStackAlignment(RegInfo->getMinStackAlign()); emitPrologue(MF); for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { emitInstruction(MBB, MI); } } emitEpilogue(MF); }" LLVM,RISCV,281,"void printInst(MCInst Inst, raw_ostream &OS, RISCV_MCInstrInfo const &MCII) { uint64_t InstBinary = Inst.getOpcode(); unsigned Opcode = MCII.get(InstBinary).Opcode; MCDisassembler::DecodeStatus Status; Status = MCDecoder.decodeInstruction(InstBinary, Inst, MCII); if (Status == MCDisassembler::Fail) { errs() << ""Error decoding instruction\n""; return; } MIPrinterMPR(*MCII.getPseudos(), *MCII.getAddlPostInstrComments()) .printInstruction(Inst, OS); }" LLVM,RISCV,282, LLVM,RISCV,283,"void printInst(const MCInst &inst, raw_ostream &OS, const MCAsmInfo *MAI) { StringRef OpcodeName = MAI->getOpcodeName(inst.getOpcode()); OS << OpcodeName << "" ""; unsigned NumOperands = inst.getNumOperands(); for (unsigned i = 0; i < NumOperands; ++i) { const MCOperand &Op = inst.getOperand(i); if(Op.isReg()) { OS << getRegisterName(Op.getReg()); } else if (Op.isImm()) { OS << Op.getImm(); } if (i != NumOperands - 1) { OS << "", ""; } } }" LLVM,RISCV,284, LLVM,RISCV,285,"void printInst(const MCInst &MI, raw_ostream &OS, const MCAsmInfo &MAI) { OS << MAI.getInstructionName(MI.getOpcode()) << "" ""; for (unsigned i = 0; i < MI.getNumOperands(); i++) { const MCOperand &MO = MI.getOperand(i); if (MO.isReg()) { OS << MAI.getRegName(MO.getReg()); } else if (MO.isImm()) { OS << MO.getImm(); } else if (MO.isExpr()) { OS << MO.getExpr()->getSymbol().getName(); } if (i < MI.getNumOperands() - 1) { OS << "", ""; } } OS << ""\n""; }" LLVM,RISCV,286,"void printInst(const MCInst *MI, raw_ostream &OS) { uint64_t Opcode = MI->getOpcode(); switch (Opcode) { #define RISCV_INST(name) \ case RISCV::name: \ print##name (MI, OS); \ break; RISCV_INST(ADD) RISCV_INST(AND) RISCV_INST(LB) RISCV_INST(LBU) RISCV_INST(LH) RISCV_INST(LHU) RISCV_INST(LW) RISCV_INST(LWU) RISCV_INST(MUL) RISCV_INST(OR) RISCV_INST(RET) RISCV_INST(SLL) RISCV_INST(SLT) RISCV_INST(SRA) RISCV_INST(SRL) RISCV_INST(SUB) RISCV_INST(XOR) RISCV_INST(PADD) RISCV_INST(PSUB) RISCV_INST(PSLL) RISCV_INST(PSRA) RISCV_INST(PSRL) default: MI->dump_pretty(OS); break; } }" LLVM,RISCV,287,"void printInst(const MCInst &Inst, raw_ostream &OS) { if (Inst.getOpcode() != RISCV::RISCV) { OS << ""Error: Invalid MCInst for RISCV target.""; return; } for (unsigned int i = 0; i < Inst.getNumOperands(); i++) { MCOperand Op = Inst.getOperand(i); OS << ""Operand "" << i << "": value = "" << Op.getImm() << "", type = "" << RISCV::getOperandTypeName(Op.getType()) << ""\n""; } if (Inst.hasExtraInfo()) { OS << ""Extra information: "" << Inst.getExtraInfo() << ""\n""; } OS << ""Opcode: "" << Inst.getOpcode() << ""\n""; }" LLVM,RISCV,288,"void printInst(const MCInst *MI, raw_ostream &OS, const MCSubtargetInfo &STI) { const MCInstrInfo *MCII = STI.getInstrInfo(); const MCRegisterInfo *MRI = STI.getRegisterInfo(); unsigned Opcode = MI->getOpcode(); const char *Mnemonic = MCII->getName(Opcode); OS << Mnemonic << "" ""; for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) { const MCOperand &MO = MI->getOperand(i); if (MO.isReg()) { unsigned Reg = MO.getReg(); OS << MRI->getName(Reg) << "" ""; } else if (MO.isImm()) { int64_t Imm = MO.getImm(); OS << ""#"" << Imm << "" ""; } else if (MO.isExpr()) { const MCExpr *Expr = MO.getExpr(); Expr->print(OS); OS << "" ""; } } OS << ""RISCV,RISCV""; }" LLVM,RISCV,289,"void printInst(const MCInst &MI, raw_ostream &Out) { if (MI.getOpcode() == RISCV) { Out << MI.getOpcodeName() << "" ""; unsigned NumOperands = MI.getNumOperands(); for (unsigned i = 0; i < NumOperands; i++) { MCOperand Op = MI.getOperand(i); if (Op.isReg()) Out << Op.getReg(); else if (Op.isImm()) Out << Op.getImm(); else if (Op.isFPImm()) Out << Op.getFPImm(); else if (Op.isExpr()) Out << Op.getExpr(); if (i != NumOperands - 1) Out << "", ""; } } else { Out << ""Invalid MCInst for RISCV target.""; } }" LLVM,RISCV,290,"void applyTargetSpecificCLOption(llvm::TargetLowering::TargetLoweringOpt &Opts) { if (Opts.TM.getTargetTriple().getArch() == llvm::Triple::riscv) { Opts.TM.OptionsABIName = ""RISCV""; Opts.TM.OptionsCPUName = ""RISCV""; Opts.TM.OptionsNoAliases = true; Opts.TM.OptionsReallyRetainParens = llvm::ReallyRetainParensEnum::Numeric; } }" LLVM,RISCV,291,"void printInst(const MCInst &Inst, raw_ostream &OS) { const Target &TheTarget = getTarget(); OS << TheTarget.getName() << ""::"" << Inst.getOpcode() << "" ""; for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i) { Inst.getOperand(i).print(OS); if (i != e-1) OS << "", ""; } OS << ""\n""; }" LLVM,RISCV,292,"void adjustStackPtr(int Amount, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL) { Register SP = RISCV::X2; Amount += 4; if (Amount < 4096) { BuildMI(MBB, MI, DL, TII.get(RISCV::ADDI), SP) .addReg(SP).addImm(Amount); } else { BuildMI(MBB, MI, DL, TII.get(RISCV::ADD), SP) .addReg(SP).addReg(RISCV::X0).addImm(Amount); } }" LLVM,RISCV,293,"bool expandPostRAPseudo(MachineInstr &MI) { switch (MI.getOpcode()) { default: return false; case RISCV::PseudoCopy: { unsigned DstReg = MI.getOperand(0).getReg(); unsigned SrcReg = MI.getOperand(1).getReg(); BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(RISCV::MV)).addReg(DstReg).addReg(SrcReg); MI.removeFromParent(); return true; } case RISCV::PseudoMoveImm: { unsigned DstReg = MI.getOperand(0).getReg(); int64_t Imm = MI.getOperand(1).getImm(); int64_t LoImm = Imm & 0xFFFFFFFF; int64_t HiImm = (Imm >> 32) & 0xFFFFFFFF; BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(RISCV::LI)).addReg(DstReg).addImm(LoImm); if (HiImm != 0) { BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(RISCV::ADDI)).addReg(DstReg).addReg(DstReg) .addImm(HiImm); } MI.removeFromParent(); return true; } } }" LLVM,RISCV,294,bool isBranch(RISCV::LoadInst *Inst){ switch(Inst->Opcode){ case RISCV::J: case RISCV::J64: case RISCV::JAL: case RISCV::JAL64: case RISCV::JALR: case RISCV::JALR64: case RISCV::BEQ: case RISCV::BEQ64: case RISCV::BNE: case RISCV::BNE64: case RISCV::BLT: case RISCV::BLT64: case RISCV::BLTU: case RISCV::BLTU64: case RISCV::BGE: case RISCV::BGE64: case RISCV::BGEU: case RISCV::BGEU64: case RISCV::BGT: case RISCV::BGT64: case RISCV::BGTU: case RISCV::BGTU64: case RISCV::BLE: case RISCV::BLE64: case RISCV::BLEU: case RISCV::BLEU64: return true; default: return false; } } LLVM,RISCV,295,"bool isLoadFromStackSlot(MachineInstr *MI, unsigned int &DestReg, int &FrameIndex) { unsigned int Opcode = MI->getOpcode(); if (Opcode == RISCV::LW || Opcode == RISCV::LWU || Opcode == RISCV::LD) { if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm()) { FrameIndex = MI->getOperand(1).getIndex(); DestReg = MI->getOperand(0).getReg(); return true; } } return false; }" LLVM,RISCV,296,"unsigned int isStoreToStackSlot(MachineInstr *MI) { assert(MI->isStore() && ""Expected a store instruction""); unsigned int RegNum, FrameIndex; RISCVII::SimpleStore STInst = RISCVII::SimpleStore(MI->getOpcode()); if (MI->getOperand(STInst.BaseOperand).getTargetFlags() == STInst.RegisterFlag && MI->getOperand(STInst.OffsetOperand).getTargetFlags() == STInst.OffsetFlag) { RegNum = MI->getOperand(STInst.BaseOperand).getReg(); FrameIndex = MI->getOperand(STInst.OffsetOperand).getIndex(); return RegNum, FrameIndex; } return 0; }" LLVM,RISCV,297, LLVM,RISCV,298, LLVM,RISCV,299,"void storeRegToStackSlot(Register reg, int stackIndex) { assert(reg.isAllocatable() && ""Only allocatable registers can be stored to stack!""); MachineBasicBlock &MBB = *MI.getParent(); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = *MF.getFrameInfo(); const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(reg); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const MCRegisterInfo *MRI = MF.getMMI().getContext().getRegisterInfo(); const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(reg); DebugLoc DL = MI->getDebugLoc(); MachineInstr *NewMI = BuildMI(MF, DL, TII->get(RISCV::SW)) .addReg(reg, getDefRegState(MI->getOperand(MI->getDesc() .getOperandConstraint(MI->getNumOperands()-1, MCOI::OperandConstraint(CHARM, RC))))) .addFrameIndex(stackIndex); return NewMI; }" LLVM,RISCV,300, bool AllowModify) { if (!isValidTerminatorOp(TerminatorOp)) { return true; } return false; } LLVM,RISCV,301, LLVM,RISCV,302, LLVM,RISCV,303,"void convertToThreeAddress(MachineInstr *MI) { switch (MI->getOpcode()) { case RISCV::ADD: case RISCV::SUB: case RISCV::MUL: case RISCV::DIV: const MachineOperand &Dest = MI->getOperand(0); const MachineOperand &Op1 = MI->getOperand(1); const MachineOperand &Op2 = MI->getOperand(2); MachineInstrBuilder MIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(MI->getOpcode())) .add(Dest) .add(Op1) .add(Op2); MI->eraseFromParent(); MI = MIB; break; default: report_fatal_error(""Unexpected opcode""); } MI->addOperand(MachineOperand::CreateImm(0)); MI->addOperand(MachineOperand::CreateImm(1)); MI->addOperand(MachineOperand::CreateImm(2)); MI->addOperand(MachineOperand::CreateImm(3)); MI->addOperand(MachineOperand::CreateImm(4)); MI->addOperand(MachineOperand::CreateImm(1)); }" LLVM,RISCV,304,"void decomposeMachineOperandsTargetFlags(const MachineOperand &MO, unsigned int &DirectTargetFlag, unsigned int &BitFlags) { unsigned int DirectMask = RISCV::MO_DIRECT_FLAG_MASK; DirectTargetFlag = MO.getTargetFlags() & DirectMask; BitFlags = MO.getTargetFlags() & ~DirectMask; }" LLVM,RISCV,305, LLVM,RISCV,306, LLVM,RISCV,307,"RISCV::RegisterClass getT0Class(MVT::SimpleValueType VT) { if (VT == MVT::i32) return RISCV::GPRRegClass; return RISCV::NoRegisterClass; } RISCV::RegisterClass getT1Class(MVT::SimpleValueType VT) { if (VT == MVT::i32) return RISCV::GPRNoZeroRegClass; return RISCV::NoRegisterClass; } namespace { class RISCVInstrInfo : public TargetInstrInfo { const RISCVSubtarget &STI; virtual const RISCVRegisterBankInfo *getRegBankInfo() const { return &RISCV::RISCVRegBankInfo; } virtual const RISCVGenInstrInfo &getRISCVInfo() const { return *Subtarget.getInstrInfo(); } public: explicit RISCVInstrInfo(const RISCVSubtarget &st) : STI(st) {} void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const; bool expandPostRAPseudo(MachineInstr *MI) const override; bool canInsertBundleAtEndOfBlock( const MachineBasicBlock &) const override { return true; } bool shouldScheduleLoadsNear(SDNode *, int, unsigned) const override { return true; } bool isSchedulingBoundary(const MachineInstr *MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override; }; } bool RISCV::RISCVInstrInfo::isSchedulingBoundary(const MachineInstr *MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const { if (MI->isConditionalBranch() || MI->isUnconditionalBranch() || MI->isBarrier(MF)) { return true; } return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF); }" LLVM,RISCV,308,"OutlinedFunction getOutliningCandidateInfo(vector candidates) { OutlinedFunction outlinedFunc; outlinedFunc.target = Triple(""riscv""); outlinedFunc.arch = ""RISCV""; outlinedFunc.machine = ""RISCV""; outlinedFunc.regSize = 32; outlinedFunc.X5 = 2; outlinedFunc.start = 0; outlinedFunc.end = 0; outlinedFunc.offset = 8; outlinedFunc.stride = 4; outlinedFunc.mask = 0; outlinedFunc.feature = Feature::StdExtC; outlinedFunc.featureLevel = 2; return outlinedFunc; }" LLVM,RISCV,309, LLVM,RISCV,310,"void getRegisterInfo(TargetInstrInfo &TII, RISCVRegisterInfo &RII){ if(!isa(TII)){ return; } RISCVTargetInstrInfo* RTII = cast(TII); RTII->getRegisterInfo(RII); }" LLVM,RISCV,311,"std::vector> getSerializableDirectMachineOperandTargetFlags() { std::vector> TargetFlags; TargetFlags.emplace_back(""riscv-call"", RISCV); TargetFlags.emplace_back(""riscv-plt"", RISCV); TargetFlags.emplace_back(""riscv-lo"", RISCV); TargetFlags.emplace_back(""riscv-hi"", RISCV); TargetFlags.emplace_back(""riscv-pcrel-lo"", RISCV); TargetFlags.emplace_back(""riscv-pcrel-hi"", RISCV); TargetFlags.emplace_back(""riscv-got-hi"", RISCV); TargetFlags.emplace_back(""riscv-tprel-lo"", RISCV); TargetFlags.emplace_back(""riscv-tprel-hi"", RISCV); TargetFlags.emplace_back(""riscv-tprel-add"", RISCV); TargetFlags.emplace_back(""riscv-tls-got-hi"", RISCV); TargetFlags.emplace_back(""riscv-tls-gd-hi"", RISCV); return TargetFlags; }" LLVM,RISCV,312, LLVM,RISCV,313, LLVM,RISCV,314,"void insertOutlinedCall(Module &M, Function *OutlinedFunc) { const TargetRegisterClass *RC = &RISCV::GPRRegClass; const TargetRegisterClass *ArgRC = &RISCV::GPRRegClass; unsigned ArgRegSize = ArgRC->getSize(); unsigned ArgNum = 1; unsigned CallOp = RISCVII::MO_CALL; unsigned CallReg = RISCV::PseudoCALLReg; unsigned OutlinedArgReg = RISCV::X5; BasicBlock *BB = BasicBlock::Create(OutlinedFunc->getContext(), ""call"", OutlinedFunc->begin()); IRBuilder<> Builder(BB); Type *FuncType = FunctionType::get(Type::getVoidTy(M.getContext()), false)->getPointerTo(); Value *FuncArg = OutlinedFunc->arg_begin(); Builder.CreateStore(FuncArg, Builder.CreateIntToPtr(ConstantInt::get(Type::getInt64Ty(M.getContext()), 0), ArgRC->getRegister(RandomChoiceInMask(*ArgRC->getRawAllocationOrder(ArrayRef(), M.getDataLayout())), ArgNum), ""X5Store"")); Builder.CreateCall(getCorrespondingFunction(FuncType, M)->getFunctionType(), FuncType, Builder.CreateIntToPtr(ConstantInt::get(Type::getInt64Ty(M.getContext()), 0), RC->getRegister(RandomChoiceInMask(*RC->getRawAllocationOrder(ArrayRef(), M.getDataLayout())), FuncArg->getName()+"".0"", CallReg), ""CALL"", CallOp)); }" LLVM,RISCV,315, LLVM,RISCV,316, LLVM,RISCV,317,"bool isCopyInstrImpl(unsigned Opcode, const MachineOperand &Dest, const MachineOperand &Src) { if (Opcode == RISCV::ADDI || Opcode == RISCV::FSGNJ_D || Opcode == RISCV::FSGNJ_S) { if (Dest.isReg() && Src.isReg()) { return true; } } return false; }" LLVM,RISCV,318,"bool isFunctionSafeToOutlineFrom(Function *F, const RISCV::TargetMachine &TM) { if (!F) return false; for (auto &BB : *F) { for (auto &I : BB) { if (auto CS = CallSite(&I)) { if (Function *Callee = CS.getCalledFunction()) { if (Callee->isDeclaration() && !Callee->isIntrinsic()) { return false; } } } } } const TargetFeatures &Features = TM.getSubtargetImpl()->getTargetTriple().getFeatureString(); if (Features.count(""RISCV"") > 0) { return false; } for (auto &BB : *F) { for (auto &I : BB) { if (auto *CI = dyn_cast(&I)) { if (auto *IA = dyn_cast(CI->getCalledOperand())) { return false; } } } } return true; }" LLVM,RISCV,319,"`bool RISCVTargetLowering::isLoadFromStackSlot(const MachineInstr *MI, int &DestReg, int &FrameIndex) const {` ` `if (MI->getOpcode() == RISCV::LB || MI->getOpcode() == RISCV::LBU || MI->getOpcode() == RISCV::LH || MI->getOpcode() == RISCV::LHU || MI->getOpcode() == RISCV::FLH || MI->getOpcode() == RISCV::LW || MI->getOpcode() == RISCV::FLW || MI->getOpcode() == RISCV::LWU || MI->getOpcode() == RISCV::LD || MI->getOpcode() == RISCV::FLD) {` const MachineOperand &DestMO = MI->getOperand(0); if (DestMO.isReg()) { DestReg = DestMO.getReg(); const MachineOperand &StackMO = MI->getOperand(1); if (StackMO.isFI()) { FrameIndex = StackMO.getIndex(); return true; } } } return false; }" LLVM,RISCV,320,"bool isMBBSafeToOutlineFrom(const MachineBasicBlock &MBB, TargetMachine &TM, const TargetRegisterInfo *TRI, unsigned Flags) { if (TM.getTargetTriple().str().contains(""riscv"")) { if (MBB.isReturnBlock()) { return false; } if (MBB.isEntryBlock()) { return true; } } return false; }" LLVM,RISCV,321," int &SrcReg, int &FrameIndex) const { assert(MI && ""Invalid machine instruction""); assert((MI->getOpcode() == RISCV::SW || MI->getOpcode() == RISCV::SD) && ""Expected store instruction""); const MachineOperand &MO = MI->getOperand(1); if (MO.isFI()) { FrameIndex = MO.getIndex(); SrcReg = MI->getOperand(0).getReg(); return true; } return false; }" LLVM,RISCV,322,void removeBranch(MachineBasicBlock &MBB) { MachineInstr *LastInstr = MBB.getInstrList().back(); if (!LastInstr->isBranch()) return; unsigned Opcode = LastInstr->getOpcode(); unsigned DescSize = LastInstr->getDesc().getSize(); unsigned SEImmOffset = DescSize - 8; unsigned BaseOpcode = LastInstr->getOperand(SEImmOffset).getImm(); unsigned SameBranchOpcode = LastInstr->getOperand(SEImmOffset + 1).getImm(); unsigned OppositeBranchOpcode = LastInstr->getOperand(SEImmOffset + 2).getImm(); if (Opcode != RISCV::SELECT_CC || DescSize != 32 || BaseOpcode != RISCV || SameBranchOpcode != 0 || OppositeBranchOpcode != 0) return; MBB.erase(MBB.end()); } LLVM,RISCV,323,"bool reverseBranchCondition(RISCVCC::CondCode CC, std::vector &Cond) { if (CC == RISCVCC::CondCode::INVALID) { return true; } if (Cond.empty() || Cond[0].isImm() || Cond[0].getImm() != 0) { return true; } Cond[0].setImm(1 - Cond[0].getImm()); return false; }" LLVM,RISCV,324,"void verifyInstruction(const MachineInstr *MI, const MCInstrDesc &Desc, const MCInstrInfo *MCII, float RISCVImm) { if (!MI->isRISCVMachineInstr()) { errs() << ""Unexpected instruction type: "" << *MI << '\n'; return; } unsigned NumOperands = Desc.getNumOperands(); for (unsigned i = 0; i < NumOperands; ++i) { const MachineOperand &MO = MI->getOperand(i); unsigned OpType = Desc.OpInfo[i].OperandType; switch (OpType) { case RISCVOp::OPERAND_FIRST_RISCV_IMM: if (MO.getImm() != RISCVImm) { errs() << ""Unexpected operand type: "" << *MI << '\n'; return; } break; case RISCVOp::OPERAND_LAST_RISCV_IMM: if (MO.getImm() != RISCVImm) { errs() << ""Unexpected operand type: "" << *MI << '\n'; return; } break; case RISCVOp::OPERAND_UIMM2: if (MO.getImm() < 0 || MO.getImm() > 2) { errs() << ""Invalid immediate: "" << *MI << '\n'; return; }" LLVM,RISCV,325,"namespace { class CopyPhysRegPass : public MachineFunctionPass { public: static char ID; CopyPhysRegPass() : MachineFunctionPass(ID) {} bool runOnMachineFunction(MachineFunction &MF) override; const char *getPassName() const override { return ""RISCV Copy Phys Reg Pass""; } private: void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DstReg, unsigned SrcReg, bool KillSrc) const; }; } char CopyPhysRegPass::ID = 0; bool CopyPhysRegPass::runOnMachineFunction(MachineFunction &MF) { const RISCVRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); bool Modified = false; for (auto &MBB : MF) { for (auto MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ++MBBI) { if (!MBBI->isCopy()) continue; unsigned DstReg = MBBI->getOperand(0).getReg(); unsigned SrcReg = MBBI->getOperand(1).getReg(); if (TRI->isPhysicalRegister(DstReg) && TRI->isPhysicalRegister(SrcReg)) copyPhysReg(MBB, MBBI, MBBI->getDebugLoc(), DstReg, SrcReg, MBBI->getOperand(1).isKill()); Modified = true; } } return Modified; }" LLVM,RISCV,326,unsigned int getInstSizeInBytes(const MachineInstr *MI) { switch (MI->getOpcode()) { default: return ~0U; case RISCV::PseudoCALLReg: case RISCV::PseudoCALL: case RISCV::PseudoJump: case RISCV::PseudoTAIL: case RISCV::PseudoLLA: case RISCV::PseudoLA: case RISCV::PseudoLA_TLS_IE: case RISCV::PseudoLA_TLS_GD: return 8; case RISCV::PseudoAtomicLoadNand32: case RISCV::PseudoAtomicLoadNand64: return 20; case RISCV::PseudoMaskedAtomicSwap32: case RISCV::PseudoMaskedAtomicLoadAdd32: case RISCV::PseudoMaskedAtomicLoadSub32: case RISCV::PseudoMaskedAtomicLoadNand32: case RISCV::PseudoMaskedAtomicLoadMax32: case RISCV::PseudoMaskedAtomicLoadMin32: case RISCV::PseudoMaskedAtomicLoadUMax32: case RISCV::PseudoMaskedAtomicLoadUMin32: case RISCV::PseudoCmpXchg32: case RISCV::PseudoCmpXchg64: case RISCV::PseudoMaskedCmpXchg32: return 28; } } LLVM,RISCV,327,OutlinedFunction getOutliningCandidateInfo() { OutlinedFunction outlinedFunc; outlinedFunc.arch = RISCV; outlinedFunc.archVariant = RISCV; outlinedFunc.registers = { RISCV::X5 }; outlinedFunc.noOfArguments = 2; outlinedFunc.noOfReturnValues = 0; outlinedFunc.minStackSize = 0; outlinedFunc.maxStackSize = 8; outlinedFunc.stackAlignment = 4; outlinedFunc.hasStdExtCFeatures = true; outlinedFunc.stdExtCVersion = 2; return outlinedFunc; } LLVM,RISCV,328,switch (outliningType) { case 0: break; case 1: break; case 2: break; default: break; } LLVM,RISCV,329, LLVM,RISCV,330,"bool insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock *NewDestBB) { const RISCVSubtarget &Subtarget = MBB.getParent()->getSubtarget(); if (!Subtarget.hasLongBranches()) { errs() << ""Unable to insert indirect branch: Branch offsets outside of the signed 32-bit range not supported for this target.\n""; return false; }" LLVM,RISCV,331,bool isAsCheapAsAMove(MachineInstr &MI) { unsigned Opcode = MI.getOpcode(); if (Opcode == RISCV::ADDI || Opcode == RISCV::ORI || Opcode == RISCV::XORI) { if (MI.getNumOperands() == 2) { if (MI.getOperand(0).isReg() && MI.getOperand(1).isReg() && MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) { if (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 1) { if (MI.getOperand(0).getReg() == RISCV::X0) { return true; } } } } } return false; } LLVM,RISCV,332," int &SrcReg) const { int Opcode = MI->getOpcode(); if (Opcode == RISCV::SB || Opcode == RISCV::SH || Opcode == RISCV::SW || Opcode == RISCV::FSW || Opcode == RISCV::SD || Opcode == RISCV::FSD) { SrcReg = MI->getOperand(0).getReg(); if (TargetRegisterInfo::isVirtualRegister(SrcReg)) SrcReg = TRI.getPhysicalRegister(SrcReg, RC); MachineMemOperand *MemOp = *MI->memoperands_begin(); if (MemOp->getValue() && isa(MemOp->getValue())) { FrameIndex = cast(MemOp->getValue())->getIndex(); return 1; } } return 0; }" LLVM,RISCV,333, LLVM,RISCV,334, LLVM,RISCV,335,"void verifyInstruction(const MachineInstr *MI) { if(MI->getOpcode() == RISCV::RISCV ){ unsigned Opc = MI->getOperand(0).getReg(); unsigned DestReg = MI->getOperand(1).getReg(); unsigned SrcReg1 = MI->getOperand(2).getReg(); unsigned SrcReg2 = MI->getOperand(3).getReg(); if(MI->getOperand(4).getImm() != RISCVOp::OPERAND_FIRST_RISCV_IMM || MI->getOperand(5).getImm() != RISCVOp::OPERAND_LAST_RISCV_IMM){ report_fatal_error(""Unexpected operand type""); } for(unsigned i = 6; i < MI->getNumOperands(); i++){ if(MI->getOperand(i).isImm()){ unsigned Imm = MI->getOperand(i).getImm(); switch (MI->getOperand(i).getType()){ case RISCVOp::OPERAND_UIMM4: if(Imm > 4){ report_fatal_error(""Invalid immediate""); } break; case RISCVOp::OPERAND_UIMM5: if(Imm > 5){ report_fatal_error(""Invalid immediate""); } break; case RISCVOp::OPERAND_UIMM12: if(Imm > 12){ report_fatal_error(""Invalid immediate""); } break; case RISCVOp::OPERAND_SIMM12: if(Imm > 12){ report_fatal_error(""Invalid immediate""); } break; case RISCVOp::OPERAND_SIMM13_LSB0: if(Imm > 12 || Imm % 2 != 0){ report_fatal_error(""Invalid immediate""); }" LLVM,RISCV,336,"static void convertToThreeAddress(const Instruction *I, MachineInstrBuilder &MIB) { if(!(I->getDesc().TSFlags & M_CONVERTIBLE_TO_3_ADDR)) { LLVMContext &Ctx = I->getContext(); Ctx.emitError(""Unexpected opcode""); return; } const MachineOperand *OP1 = I->getOperand(0); const MachineOperand *OP2 = I->getOperand(1); const MachineOperand *OP3 = I->getOperand(2); MIB.addReg(OP1->getReg()); MIB.addReg(OP2->getReg()); MIB.addReg(OP3->getReg()); if(I->getNumOperands() > 3) { for(int i = 3; i < I->getNumOperands(); i++) { const MachineOperand *OP = I->getOperand(i); MIB.addReg(OP->getReg()); } } } " LLVM,RISCV,337,"bool verifyInstruction(const RISCVInst *Inst) { unsigned Opcode = Inst->getOpcode(); if (Opcode == RISCV::Inst) { unsigned NumOps = Inst->getNumOperands(); for (unsigned i = 0; i < NumOps; i++) { unsigned OpType = Inst->getOperand(i)->getType(); if (OpType == RISCVOp::OPERAND_FIRST_RISCV_IMM || OpType == RISCVOp::OPERAND_LAST_RISCV_IMM) { Value *ImmVal = Inst->getOperand(i); if (!isa(ImmVal) && !isa(ImmVal)) { errs() << ""Operand type of RISCV immediate is not valid""; return false; } } else if (OpType == RISCVOp::OPERAND_UIMM4 || OpType == RISCVOp::OPERAND_UIMM5 || OpType == RISCVOp::OPERAND_UIMM12 || OpType == RISCVOp::OPERAND_SIMM12 || OpType == RISCVOp::OPERAND_UIMM20 || OpType == RISCVOp::OPERAND_UIMMLOG2XLEN) { Value *ImmVal = Inst->getOperand(i); if (!isa(ImmVal)) { errs() << ""Operand type of RISCV immediate is not valid""; return false; }" LLVM,RISCV,338,"bool LLVMRISCVTargetMachine::isMBBSafeToOutlineFrom(MachineBasicBlock *MBB, LLVMRISCVSubtarget &STI, unsigned Flags) const { if(!MBB) return false; if(MBB->isEHPad()) return false; if(MBB->isEntryBlock() || MBB->isReturnBlock()) return false; if(MBB->hasInlineAsm()) return false; if(MBB->succ_empty()) return false; if(MBB->pred_empty()) return false; if(MBB->getFirstTerminator() == MBB->end()) return false; for(MachineBasicBlock::iterator MI = MBB->begin(), ME = MBB->end(); MI != ME; ++MI) { if(MI->isPHI()) return false; } for(MachineBasicBlock::iterator MI = MBB->begin(), ME = MBB->end(); MI != ME; ++MI) { if(MI->isCall()) return false; } return true; }" LLVM,RISCV,339,"void convertToThreeAddress(MachineInstr &MI) { assert(MI.isConvertableToThreeAddr() && ""Unexpected opcode for RISCV backend.""); MachineBasicBlock *MBB = MI.getParent(); DebugLoc DL = MI.getDebugLoc(); MachineFunction *MF = MBB->getParent(); const RISCVSubtarget &STI = MF->getSubtarget(); unsigned NumOps = MI.getDesc().getNumOperands(); unsigned RISCV = MI.getOperand(0).getReg(); unsigned RISCV = MI.getOperand(1).getReg(); unsigned Immediate = MI.getOperand(2).getImm(); unsigned Operand1 = MI.getOperand(3).getReg(); unsigned Operand2 = MI.getOperand(4).getReg(); unsigned Result = MI.getOperand(5).getReg(); unsigned Cond = MI.getOperand(6).getImm(); unsigned PredReg = MI.getOperand(7).getReg(); int FPFlag = MI.getOperand(8).getImm(); unsigned Modifier = MI.getOperand(9).getReg(); MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, MI.getDesc()) .addReg(RISCV).addReg(RISCV).addImm(Immediate).addReg(Operand1) .addReg(Operand2).addReg(Result).addImm(Cond).addReg(PredReg) .addImm(FPFlag).addReg(Modifier); for (unsigned i = NumOps; i != 10; ++i) MIB.add(MI.getOperand(i)); MBB->erase(MI); }" LLVM,RISCV,340,"function copyPhysReg(srcReg, dstReg) { if (srcReg.architecture != RISCV || dstReg.architecture != RISCV) { throw error(""Invalid target specific value.""); } type = branchAnalysis(srcReg); if (type == physicalRegister) { dstReg.value = srcReg.value; } else { srcReg = resolveVirtualReg(srcReg); copyPhysReg(srcReg, dstReg); } dstReg.branchInstruction = new PhysicalRegister(srcReg.index); return success; }" LLVM,RISCV,341,"void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB) { unsigned regScavengerRequired = true; unsigned newBlockExpanding = true; unsigned branchOffsetLimit = 32; if (MBB.getParent()->getTarget().getTargetTriple().getArch() != Triple::RISCV) { report_fatal_error(""Indirect branch insertion supported only for RISCV backend""); } if (regScavengerRequired && !MBB.getParent()->getSubtarget().enableRegScavenging()) { report_fatal_error(""RegScavenger not supported for long branching""); } " LLVM,RISCV,342,unsigned getInstSizeInBytes(const MachineInstr &MI) { const MCInstrDesc &Desc = MI.getDesc(); if (!Desc.isPseudo()) return Desc.getSize(); switch (MI.getOpcode()) { case RISCV::PseudoCALL: return 8; default: return ~0U; } } LLVM,RISCV,343, LLVM,RISCV,344,"LLVMValueRef loadRegFromStackSlot(LLVMBuilderRef Builder, LLVMValueRef RISCV, int index, LLVMTypeRef type) { RISCV::RegClass regClass; if (type == RISCV::GPR32RegClass) { regClass = RISCV::GPR32RegClass; } else if (type == RISCV::FPR32RegClass) { regClass = RISCV::FPR32RegClass; } else if (type == RISCV::FPR64RegClass) { regClass = RISCV::FPR64RegClass; } int opcode; if (regClass == RISCV::GPR32RegClass) { opcode = RISCV::LW; } else if (regClass == RISCV::FPR32RegClass) { opcode = RISCV::FLW; } else if (regClass == RISCV::FPR64RegClass) { opcode = RISCV::FLW; } LLVMValueRef loadedReg = LLVMBuildLoad(Builder, type, LLVMBuildIntToPtr(Builder, LLVMConstInt(LLVMInt32Type(), index, 0), type, ""index""), ""loadedReg""); if (loadedReg == NULL) { LLVMValueRef errMsg = LLVMBuildGlobalStringPtr(Builder, ""Can't load this register from stack slot""); LLVMValueRef zero = LLVMConstInt(LLVMInt32Type(), 0, 0); LLVMValueRef gepArgs[] = { zero, zero }; LLVMValueRef regLoadErrMsg = LLVMBuildGEP(Builder, errMsg, gepArgs, sizeof(gepArgs)/sizeof(gepArgs[0]), ""regLoadErrMsg""); LLVMBuildCall(Builder, LLVMGetNamedFunction(RISCV, ""printf""), ®LoadErrMsg, 1, ""printf""); return 0; }" LLVM,RISCV,345," const TargetRegisterInfo *TRI, unsigned &SavedRegsSize) { if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) { report_fatal_error(""Can't store this register to stack slot""); }" LLVM,RISCV,346, LLVM,RISCV,347,"unsigned getInstSizeInBytes(const MachineInstr *MI) { assert(MI && ""Invalid MachineInstr provided!""); const MCInstrDesc &MID = MI->getDesc(); return MID.getSize(); }" LLVM,RISCV,348," const TargetRegisterInfo *TRI) const { DEBUG(dbgs() << ""Inserting "" << printReg(DestReg, TRI) << "" = LOAD STACK "" << printRegClass(RC, TRI) << ""\n""); Register FrameReg = RISCV::X2; unsigned LdOp = RISCV::LW; MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), get(LdOp), DestReg) .addFrameIndex(FI) .addReg(FrameReg) .addImm(0); if (RC == &RISCV::GPRRegClass) { MIB->getOperand(0).setIsDead(); MIB.addReg(FrameReg, RegState::ImplicitDefine); } else { MIB->getOperand(0).setIsDef(); } }" LLVM,RISCV,349," const TargetInstrInfo *TII) { DebugLoc DL = MI->getDebugLoc(); const RISCVInstrInfo *RIII = static_cast(TII); if (RIII->isIntRegsRegisterClass(RC)) { uint64_t RegMark = RISCV::GPRRegClass.getRegister((unsigned)SrcReg); assert(RegMark && ""Attempt to store an unallocatable register.""); unsigned Size = (unsigned)RIII->getRegSizeInBits(RISCV::GPRRegClass) / 8; while (Size) { unsigned SubRegs = Size > 4 ? 4 : Size; RISCV::SubReg s; switch(SubRegs) { case 1: if (RISCV::GRsub_8bit.getSubClassMask() & RegMark) s = RISCV::GRsub_8bit; else s = (RISCV::SubReg)0; break; case 2: if (RISCV::GRsub_16bit.getSubClassMask() & RegMark) s = RISCV::GRsub_16bit; else s = (RISCV::SubReg)0; break; case 4: if (RISCV::GRsub_32bit.getSubClassMask() & RegMark) s = RISCV::GRsub_32bit; else s = (RISCV::SubReg)0; break; default: llvm_unreachable(""Attempted to allocate impossible sub-register""); } uint64_t RealReg = RISCV::GPRRegClass.getSubClassMask() & RegMark & s.getSubClassMask(); if (!RealReg) { llvm_unreachable(""Attempted to allocate unreferenced subregister for physical register.""); continue; }" LLVM,RISCV,350,"unsigned int getInstSizeInBytes(const MachineInstr *MI) { if (!MI) return 0; const MachineBasicBlock *MBB = MI->getParent(); assert(MBB && ""Instruction does not belong to a basic block!""); const MachineFunction *MF = MBB->getParent(); assert(MF && ""Machine Basic Block does not belong to a Machine Function!""); const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); assert(TII && ""Target Instruction Info not found in Machine Function Subtarget!""); const MCInstrDesc &MCID = MI->getDesc(); if (MCID.isCall()) { if (MI->getOpcode() == RISCV::PseudoCALL || MI->getOpcode() == RISCV::PseudoTAIL) { return 4; } if (MI->getOpcode() == RISCV::PseudoCALLReg || MI->getOpcode() == RISCV::PseudoLLA || MI->getOpcode() == RISCV::PseudoLA || MI->getOpcode() == RISCV::PseudoLA_TLS_IE || MI->getOpcode() == RISCV::PseudoLA_TLS_GD) { return 8; } } return TII->getInstSizeInBytes(MI->getOpcode()); }" LLVM,RISCV,351," const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const { RISCVSubtarget Subtarget = MF.getSubtarget(); const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); const RISCVInstrInfo *TII = Subtarget.getInstrInfo(); unsigned DestPhysReg = TRI->getEncodingValue(DestReg); unsigned SrcPhysReg = TRI->getEncodingValue(SrcReg); if (DestPhysReg == 0 || SrcPhysReg == 0) { report_fatal_error(""Invalid register for copyPhysReg""); } const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(DestReg); if (RC != TRI->getMinimalPhysRegClass(SrcReg)) { report_fatal_error(""Invalid register class for copyPhysReg""); }" LLVM,RISCV,352, LLVM,RISCV,353, LLVM,RISCV,354,"void commuteInstructionImpl(SDNode *Node, SelectionDAG *DAG, MachineInstr *MI, const TargetInstrInfo *TII, MachineRegisterInfo *MRI) { unsigned Opcode = MI->getOpcode(); if (Opcode == RV::ADD || Opcode == RV::SUB || Opcode == RV::MUL) { SDValue Op0 = MI->getOperand(0); SDValue Op1 = MI->getOperand(1); if (Op0.isReg() && Op1.isReg()) { MI->getOperand(0).setReg(Op1.getReg()); MI->getOperand(1).setReg(Op0.getReg()); } switch (Opcode) { case RV::ADD: MI->setDesc(TII->get(RV::ADDI)); break; case RV::SUB: MI->setDesc(TII->get(RV::SUBI)); break; case RV::MUL: MI->setDesc(TII->get(RV::MULI)); break; } } }" LLVM,RISCV,355," MachineFunction &MF) { if (!(MI->getDesc().TSFlags & RISCV::M_CONVERTIBLE_TO_3_ADDR)) return; unsigned Opc = MI->getOpcode(); unsigned IntData = MI->getOperand(0).getImm(); unsigned RegData = MI->getOperand(1).getReg(); unsigned BaseData = MI->getOperand(2).getReg(); unsigned IndexData = MI->getOperand(3).getImm(); unsigned ShiftAmtData = MI->getOperand(4).getImm(); unsigned BitData = MI->getOperand(5).getReg(); unsigned ImmData = MI->getOperand(6).getImm(); unsigned AddrData = MI->getOperand(7).getReg(); unsigned PredData = MI->getOperand(8).getReg(); unsigned UndefData = MI->getOperand(9).getImm(); unsigned ExtraData = MI->getOperand(10).getImm(); MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), MF.getSubtarget().getInstrInfo() ->get(Opc)); MIB.addImm(IntData); MIB.addReg(RegData); MIB.addReg(BaseData); MIB.addImm(IndexData); MIB.addImm(ShiftAmtData); MIB.addReg(BitData); MIB.addImm(ImmData); MIB.addReg(AddrData); MIB.addReg(PredData); MIB.addImm(UndefData); MIB.addImm(ExtraData); MBB->erase(MI); }" LLVM,RISCV,356,unsigned getInstSizeInBytes(const MachineInstr &MI) { unsigned Opcode = MI.getOpcode(); unsigned Size = 0; switch (Opcode) { case RISCV::ADD: case RISCV::SUB: case RISCV::MUL: Size = 4; break; default: Size = ~0U; break; } return Size; } LLVM,RISCV,357, LLVM,RISCV,358,"void RISCVTargetLowering::insertIndirectBranch(MachineBasicBlock *MBB, MachineBasicBlock *NewDestBB) const { MachineFunction *MF = MBB->getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); DebugLoc DL; unsigned DestReg = MRI.createVirtualRegister(RISCV::GPRRegClass); BuildMI(*MBB, MBB->end(), DL, TII.get(RISCV::PseudoJump)).addReg(DestReg, RegState::Define).addMBB(NewDestBB, RISCVII::MO_CALL); MachineBasicBlock::iterator I = MBB->end(); do { I--; } while (I != MBB->begin() && I->getDesc().isTerminator() && !I->isPredicable()); if (!I->getDesc().isTerminator()) { MachineBasicBlock *NewMBB = MF->CreateMachineBasicBlock(); MF->insert(++MBB->getIterator(), NewMBB); BuildMI(*NewMBB, NewMBB->begin(), DL, TII.get(RISCV::LUI)).addReg(DestReg).addMBB(NewDestBB, RISCVII::MO_CALL); } else { MachineInstr *MI = &*(I); DebugLoc DL = MI->getDebugLoc(); BuildMI(*MBB, ++I, DL, TII.get(RISCV::LUI)).addReg(DestReg).addMBB(NewDestBB, RISCVII::MO_CALL); }" LLVM,RISCV,359,"void insertOutlinedCall(Function *OutlinedFn, Instruction *InsertPt) { Function *CurFn = InsertPt->getParent()->getParent(); Module *M = CurFn->getParent(); const TargetRegisterClass *RC = &RISCV::GPRRegClass; const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); Register Reg = TRI->getAllocatableClass(RC)[0]; BuildMI(*CurFn, InsertPt, DebugLoc(), TII->get(RISCV::PseudoCALLReg), Reg) .addExternalSymbol(OutlinedFn->getName()); MachineInstrBuilder MIB = BuildMI(*CurFn, InsertPt, DebugLoc(), TII->get(RISCVII::MO_CALL), RISCV::X0) .addReg(Reg) .addImm(0); MIB = MIB.setMemRefs(OutlinedFn->getMemRefs()); CurFn->insert(InsertPt->getIterator(), MIB); if (!M->getFunction(OutlinedFn->getName())) M->getOrInsertFunction(OutlinedFn->getName(), OutlinedFn->getFunctionType()); }" LLVM,RISCV,360,bool isAsCheapAsAMove(Instruction *I){ if(I->getOpcode() == RISCV::FSGNJ_D || I->getOpcode() == RISCV::FSGNJ_S || I->getOpcode() == RISCV::FSGNJ_H){ if(I->getNumOperands() == 3){ if(I->getOperand(0)->getType() == I->getOperand(1)->getType()){ if(I->getOperand(2)->getInstOpcode() == RISCV::ADDI || I->getOperand(2)->getInstOpcode() == RISCV::ORI || I->getOperand(2)->getInstOpcode() == RISCV::XORI){ if(I->getOperand(2)->getOperand(1)->getSExtValue() == 1 || I->getOperand(2)->getOperand(1)->getSExtValue() == 2){ if(I->getOperand(0)->getRegister() == I->getOperand(1)->getRegister()){ if(I->getOperand(1)->getRegister() == RISCV::X0){ return true; } } } } } } } return false; } LLVM,RISCV,361,"bool isCopyInstrImpl(MachineInstr *MI, const TargetRegisterInfo *TRI) { if (MI->getOpcode() == RISCV::ADDI) { const MachineOperand &Dst = MI->getOperand(0); const MachineOperand &Src = MI->getOperand(1); if (Dst.isReg() && Src.isReg() && TRI->getRegSizeInBits(Dst.getReg()) == TRI->getRegSizeInBits(Src.getReg())) { MI->clear(); MI->addOperand(MachineOperand::CreateReg(Dst.getReg(), false)); MI->addOperand(MachineOperand::CreateReg(Src.getReg(), false)); return true; } } else if (MI->getOpcode() == RISCV::FSGNJ_D) { const MachineOperand &Dst = MI->getOperand(0); const MachineOperand &Src = MI->getOperand(1); if (Dst.isReg() && Src.isReg() && TRI->getRegSizeInBits(Dst.getReg()) == TRI->getRegSizeInBits(Src.getReg())) { MI->clear(); MI->addOperand(MachineOperand::CreateReg(Dst.getReg(), false)); MI->addOperand(MachineOperand::CreateReg(Src.getReg(), false)); return true; } }" LLVM,RISCV,362,"switch (Inst.getOpcode()) { case RISCV::OPCODE_NAME: if (!verifyOperandType(Inst, RISCVOp::OPERAND_FIRST_RISCV_IMM)) { Report << ""Unexpected first operand type - expected a RISCV immediate\n""; isValid = false; } break; } Next, the function will check for specific RISCV operands that have explicit constraints on their values. For example, RISCVOp::OPERAND_UIMM2 should be in the range [0,2], RISCVOp::OPERAND_UIMM3 should be in the range [0,3], and so on. If any operand's value fails to meet these constraints, the function will set isValid to false and output an error message using the Report stream. For example: if (Inst.getOperand(0).getImm() < 0 || Inst.getOperand(0).getImm() > 2) { Report << ""Immediate operand must be between 0 and 2\n""; isValid = false; }" LLVM,RISCV,363,"enum OutliningType { NoOutlining, JSRCall, MoveRegonizing, VariableCall, DynamicCall }; ``` 3. Define the body of the function: The function will check the value of the target-specific values RISCV and RISCV::X5. If both are not equal to RISCV::X5, it will return NoOutlining, which indicates that outlining should not be performed. Otherwise, the value of RISCV::X5 will be checked and based on it, the appropriate OutliningType will be returned. The code for this function can be written as: ``` OutliningType RISCVTargetMachine::getOutliningType(const RISCVSubtarget *STI) { bool hasMIT = STI->getRegisterInfo() ->getSubRegIndexLaneMask(RISCV::X5); if (TargetVersoin == 1) { return OutliningType::NoOutlining; } if(TargetVersoin == 2) { if (hasMIT) return OutliningType::JSRCall; return OutliningType::MoveRegonizing; } if(TargetVersoin == 3) { if (hasMIT) return OutliningType::VariableCall; return OutliningType::DynamicCall; } }" LLVM,RISCV,364,bool shouldOutlineFromFunctionByDefault() { if (Triple.getArch() == Triple::riscv32 || Triple.getArch() == Triple::riscv64) { return true; } return false; } LLVM,RISCV,365, LLVM,RISCV,366,"loadRegFromStackSlot(unsigned Reg, MachineInstr *MI, MachineBasicBlock &MBB, MachineFunction &MF, const TargetRegisterClass *RC, int FI, DebugLoc DL) { if (RC == RISCV::GPRRegClass || RC == RISCV::FPR32RegClass || RC == RISCV::VRM2RegClass) { DataType = RISCV::LW; } else if (RC == RISCV::FLW) { DataType = RISCV::FLW; } else if (RC == RISCV::LD || RC == RISCV::FPR64RegClass || RC == RISCV::VRM4RegClass) { DataType = RISCV::LD; } else if (RC == RISCV::FLD) { DataType = RISCV::FLD; } else { llvm_unreachable(""Can't load this register from stack slot""); }" LLVM,RISCV,367," unsigned DstReg, DebugLoc DL = DebugLoc(), const MachineInstr::MIFlag ** MIFlags = nullptr) { assert(RC->contains(SrcReg) && ""Can't store this register to stack slot""); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Opcode)); MIB.addReg(DstReg); MIB.addFrameIndex(FrameIndex); if (Size == 32) { MIB.addMemOperand(MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOStore, 4, 4)); MIB.addMemOperand(MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOStore, 4, 4)); } " LLVM,RISCV,368, LLVM,RISCV,369,"void insertBranch(MachineBasicBlock *MBB, MachineBasicBlock *TargetBlock) { BuildMI(MBB, MBB->end(), DebugLoc(), TII->get(RISCV::PseudoCBR)) .addMBB(TargetBlock) .addImm(0) .addImm(0) .addImm(1) .addImm(2) .addImm(1) .addImm(2) .setMemRefs(nullptr); MachineInstr *BrInst = MBB->end(); BrInst->getOperand(0).setIsJTI(); BrInst->setDesc(TII->get(RISCV::PseudoBR)); BrInst->setFlags(RISCVABI::isCheriPureCapABI); }" LLVM,RISCV,370,"void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock* NewDestBB) { MachineFunction *MF = MBB.getParent(); MachineInstrBuilder MIB; bool RequiresRegScavenger = MF->getInfo()->requiresRegScavengerForLongBranch(); if (RequiresRegScavenger) { MachineBasicBlock *ExpandMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); MF->insert(MF->iterator_to(MBB), ExpandMBB); MBB.replaceInstrWithBranch(*MBB.instr_begin(), ExpandMBB); } MIB = BuildMI(MBB, MBB.getFirstTerminator(), MBB.findRegisterDefOperandIdx(RISCV::GP, false, true), RISCV::PseudoCJump); MIB.addMBB(NewDestBB); MIB.addReg(RISCV::GPCRRegClass); MIB.addImm(RISCV::PseudoJump); MIB.addImm(RISCVII::MO_CALL); MIB.addImm(0); MIB.addImm(8); MIB.addGlobalAddress(NewDestBB->getSymbol(), 0, RISCVII::MO_NONE); if (RISCVABI::isCheriPureCapABI(MF->getFunction().getCallingConv())) { MIB.addReg(RISCV::GPCRRegClass); } else { MIB.addReg(RISCV::GPRRegClass); } MBB.insert(MBB.getFirstTerminator(), MIB); }" LLVM,RISCV,371,"enum RISCVInstType { CMove = RISCV::CMove, CIncOffset = RISCV::CIncOffset, CIncOffsetImm = RISCV::CIncOffsetImm, FSGNJ_D = RISCV::FSGNJ_D, FSGNJ_S = RISCV::FSGNJ_S, ADDI = RISCV::ADDI, ORI = RISCV::ORI, XORI = RISCV::XORI }; Next, we will define a function called isAsCheapAsAMove that takes in an instruction and returns a boolean: bool isAsCheapAsAMove(const RISCVInstType Inst) { } Inside the function, we will use a switch statement to check for each type of instruction and return true if it exists in our list of target specific values: switch (Inst) { case RISCVInstType::CMove: case RISCVInstType::CIncOffset: case RISCVInstType::CIncOffsetImm: case RISCVInstType::FSGNJ_D: case RISCVInstType::FSGNJ_S: case RISCVInstType::ADDI: case RISCVInstType::ORI: case RISCVInstType::XORI: return true; default: return false; } This ensures that if the instruction matches one of the specified types, the function will return true. If it does not match, it will return false. Finally, we can add in the target specific values to make sure the function returns the correct value: bool isAsCheapAsAMove(const RISCVInstType Inst) { switch (Inst) { case RISCVInstType::CMove: case RISCVInstType::CIncOffset: case RISCVInstType::CIncOffsetImm: case RISCVInstType::FSGNJ_D: case RISCVInstType::FSGNJ_S: case RISCVInstType::ADDI: case RISCVInstType::ORI: case RISCVInstType::XORI: return true; default: return false; } }" LLVM,RISCV,372," bool isBranchOffsetInRange(const MachineInstr* instr, const MachineBasicBlock* MBB, const MCSubtargetInfo& STI) { First, we need to check if the instruction is a branch or jump instruction. If not, we can simply return false since there is no branch offset to check. if (!instr->isBranch() && !instr->isCall()) { return false; } Next, we need to get the encoding information for the instruction and the target specific values for the branch and jump instructions. const MCInstrDesc& MCID = instr->getDesc(); uint64_t OffsetMask = MCID.TSFlags & (RISCV::BranchMask | RISCV::JumpMask); uint64_t MatchMask = OffsetMask >> 16; uint64_t Opcode = (MCID.TSFlags & (RISCV::BasicBlockDependent | RISCV::JumpOrBranch)) >> RISCV::TSFlags; uint64_t TargetSpecificValue = MBB->getInfo().getTargetSpecificValue(); Now we need to get the offset for the branch instruction from the MCOperand and check if it is in range. To determine the range, we need to convert the offset in MachineOperand to an integer value and add it to the current address of the instruction. We also need to consider the branch offset's sign. if (instr->isBranch()) { int64_t BranchOffset = instr->getOperand(0).getImm(); int64_t CurrentPC = instr->getAddr() + 4; bool isNegative = (BranchOffset >> 11) & 1; int64_t Offset = (isNegative) ? -1 * (BranchOffset & 0x7FF) : BranchOffset & 0x7FF; int64_t BranchAddress = CurrentPC + Offset; if (BranchAddress > (CurrentPC + 2048) || BranchAddress < (CurrentPC - 2048)) { return false; } }" LLVM,RISCV,373, int &Offset) { switch (MI->getOpcode()) { default: return 0; case RISCV::LB: case RISCV::LBU: case RISCV::LH: case RISCV::LHU: case RISCV::FLH: case RISCV::LW: case RISCV::FLW: case RISCV::LWU: case RISCV::LD: case RISCV::FLD: case RISCV::LC_64: case RISCV::LC_128: case RISCV::CLB: case RISCV::CLBU: case RISCV::CLH: case RISCV::CLHU: case RISCV::CLW: case RISCV::CFLW: case RISCV::CLWU: case RISCV::CLD: case RISCV::CFLD: case RISCV::CLC_64: case RISCV::CLC_128: if (MI->mayLoad()) { if (MI->getNumOperands() == 3 && MI->getOperand(0).isReg() && MI->getOperand(1).isFI() && MI->getOperand(2).isImm()) { NumBytes = MI->getOperand(2).getImm(); Offset = MI->getOperand(1).getIndex(); return MI->getOperand(0).getReg(); } } return 0; } } LLVM,RISCV,374, LLVM,RISCV,375,uint64_t RISCVInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const { switch (MI->getOpcode()) { case RISCV::PseudoCALLReg: case RISCV::PseudoCALL: case RISCV::PseudoJump: case RISCV::PseudoTAIL: case RISCV::PseudoLLA: case RISCV::PseudoLA: case RISCV::PseudoLA_TLS_IE: case RISCV::PseudoLA_TLS_GD: return 8; default: return ~0U; } } LLVM,RISCV,376, LLVM,RISCV,377,"bool isBranchOffsetInRange(unsigned Opcode, int64_t Offset) { switch (Opcode) { default: llvm_unreachable(""Unexpected opcode!""); case RISCV::BEQ: case RISCV::BNE: case RISCV::BLT: case RISCV::BGE: case RISCV::BLTU: case RISCV::BGEU: case RISCV::CV_BEQIMM: case RISCV::CV_BNEIMM: return Offset >= -2048 && Offset < 2048; case RISCV::JAL: case RISCV::PseudoBR: case RISCV::PseudoJump: return Offset >= -524288 && Offset < 524288; case RISCV::HWPLBranch: return Offset >= -1024 && Offset < 1024; } }" LLVM,RISCV,378,"bool reverseBranchCondition(ConditionList &cond) { if (cond.size() != 3) { return true; } CondCode opCode = (CondCode)cond[0]; uint64_t lhs = cond[1]; uint64_t rhs = cond[2]; switch (opCode) { case RISCV::HwlpBranch: cond[1] = rhs; cond[2] = lhs; return false; default: report_fatal_error(""Invalid branch condition!""); return true; } }" LLVM,RISCV,379, LLVM,RISCV,380,"Function *buildOutlinedFrame(Function &F, Module &M, LLVMContext &Context) { FunctionType *FuncType = F.getFunctionType(); std::string OutlinedName = F.getName().str() + ""_outlined""; Function *OutlinedFunc = Function::Create(FuncType, Function::ExternalLinkage, OutlinedName, &M); TargetSpecificValue RISCV_JALR = 0; TargetSpecificValue RISCV_X0 = 0; TargetSpecificValue RISCV_X5 = 0; BasicBlock *EntryBB = BasicBlock::Create(Context, ""entry"", OutlinedFunc); Argument *FuncArgs = OutlinedFunc->arg_begin(); IRBuilder<> Builder(EntryBB); std::map ValueMap; for (auto &Arg : F.args()) { AllocaInst *Alloca = Builder.CreateAlloca(Arg.getType()); ValueMap[&Arg] = Alloca; Builder.CreateStore(&Arg, Alloca); FuncArgs++; }" LLVM,RISCV,381,"define i64 copyPhysReg(i64 sourceReg, i64 destReg, regClass regClass, i16 CSI, i16 TRI) { if (sourceReg == destReg) { return 0; } if (regClass == RISCV::GPRRegClass) { ADDI(destReg, sourceReg, 0); } else if (regClass == RISCV::FPR32RegClass) { FSGNJ_S(destReg, sourceReg); }" LLVM,RISCV,382,unsigned int getInstSizeInBytes(const MachineInstr *MI) { int Size = 0; const MCInstrDesc &Desc = MI->getDesc(); if (Desc.isCall()) { const MachineOperand &Op = MI->getOperand(0); if (Op.isReg()) Size += 4; } if (Desc.isCall() || Desc.isTailCall()) Size += 4; if (Desc.isLoad() || Desc.isMoveImmediate()) { const MachineOperand &Base = MI->getOperand(0); if (Base.isReg()) Size += 4; if (Base.isImm()) Size += 4; } return Size; } LLVM,RISCV,383,"#define RISCV_LOAD_OPCODES {RISCV::LB,RISCV::LBU,RISCV::LH,RISCV::LHU,RISCV::LW,RISCV::FLW,RISCV::LWU,RISCV::LD,RISCV::FLD,RISCV::VLE_V_um,RISCV::VLE_V_um} std::pair isLoadFromStackSlot(MachineInstr *MI, const int *TargetSpecificValues) { unsigned RegNum = 0; int FrameIndex = 0; for(int i = 0; TargetSpecificValues[i] != 0; i++) { if(MI->getOpcode() == TargetSpecificValues[i]) { MachineOperand Src = MI->getOperand(1); if(Src.isFI()) { FrameIndex = Src.getIndex(); MachineOperand Dest = MI->getOperand(0); if(Dest.isReg()) { RegNum = Dest.getReg(); } } break; } } return std::make_pair(RegNum, FrameIndex); }" LLVM,RISCV,384," * the instruction is not a store to stack slot, returns {-1,0}. */ std::pair isStoreToStackSlot(const MachineInstr *MI, const MachineRegisterInfo &MRI) { if (!MI->isStore()) return {-1,0}; const MachineOperand &MO = MI->getOperand(0); if (!MO.isFI()) return {-1,0}; unsigned int FrameIdx = MO.getIndex(); const MachineOperand &SrcMO = MI->getOperand(1); if (!SrcMO.isReg()) return {-1,0}; unsigned int RegNum = SrcMO.getReg(); if (TargetRegisterInfo::isVirtualRegister(RegNum)) { const MachineRegisterInfo::VirtRegInfo *VirtReg = MRI.getVRegInfo(RegNum); RegNum = VirtReg->PhysReg; } return {RegNum, FrameIdx}; }" LLVM,RISCV,385," const TargetRegisterInfo *TRI) { MachineFrameInfo &MFI = MF.getFrameInfo(); int Offset = MFI.getObjectOffset(FrameIndex); int FrameSize = MFI.getStackSize(); if (Offset < 0 || Offset >= FrameSize) { report_fatal_error(""Can't load this register from stack slot""); } MachineBasicBlock &MBB = MF.front(); MachineBasicBlock::iterator MBBI = MBB.begin(); BuildMI(MBB, MBBI, DL, TII->get(RISCV::LW), DestReg) .addFrameIndex(FrameIndex) .addImm(Offset) .addReg(RISCV::SP); if (RC->hasType(MVT::f32) || RC->hasType(MVT::f64)) { BuildMI(MBB, MBBI, DL, TII->get(RISCV::LD), DestReg) .addFrameIndex(FrameIndex) .addImm(Offset) .addReg(RISCV::SP); } if (RC->isVector()) { BuildMI(MBB, MBBI, DL, TII->get(RISCV::LW), DestReg) .addFrameIndex(FrameIndex) .addImm(Offset) .addReg(RISCV::SP); } }" LLVM,RISCV,386,"void storeRegToStackSlot(reg, regClass, stackIndex, MBB, DL, MF) { if (MF.getSubtarget().isRISCV64()) { if (regClass == RISCV::GPRRegClass) { MBB.addInstr(RISCV::SD, DL, MBB.getMoveImmediate(DL, stackIndex), reg); } else if (regClass == RISCV::FPR32RegClass) { MBB.addInstr(RISCV::FSW, DL, MBB.getMoveImmediate(DL, stackIndex), reg); } else if (regClass == RISCV::FPR64RegClass) { MBB.addInstr(RISCV::FSD, DL, MBB.getMoveImmediate(DL, stackIndex), reg); } else if (regClass == RISCV::VGRRegClass) { MBB.addInstr(RISCV::VSE_V_um, DL, MBB.getMoveImmediate(DL, stackIndex), reg); } else if (regClass == RISCV::VPRRegClass) { MBB.addInstr(RISCV::VSE_V_um, DL, MBB.getMoveImmediate(DL, stackIndex), reg); } else if (regClass == RISCV::VQRRegClass) { MBB.addInstr(RISCV::VSE_V_um, DL, MBB.getMoveImmediate(DL, stackIndex), reg); } else if (regClass == RISCV::VORRegClass) { MBB.addInstr(RISCV::VSE_V_um, DL, MBB.getMoveImmediate(DL, stackIndex), reg); }" LLVM,RISCV,387, LLVM,RISCV,388," const TargetRegisterInfo *TRI) const { assert(RC == RISCV::GPRRegClass && ""Can only store to RISCV GPR register class""); MachineFrameInfo &MFI = MF.getFrameInfo(); MachineBasicBlock &MBB = MF.front(); MachineRegisterInfo &MRI = MF.getRegInfo(); unsigned SP = RISCV::X2; unsigned StoreOpcode = RISCV::SW; bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(SrcReg); const TargetRegisterClass *SrcRC = isPhysReg ? nullptr : MRI.getRegClass(SrcReg); DebugLoc DL; if (!isPhysReg) { SrcReg = MRI.createVirtualRegister(SrcRC); } MachineInstrBuilder MIB = BuildMI(MBB, MBB.getFirstNonPHI(), DL, get(StoreOpcode)); MIB.addReg(SP, RegState::Define); MIB.addImm(FI); if (isKill) { MIB.addReg(SrcReg, getKillRegState(true)); } else { MIB.addReg(SrcReg, RegState::Kill); } if (!isPhysReg) { MFI.setVirtualRegisterUsedInPrologue(SrcReg); } O << ""Can't store this register to stack slot""; }" LLVM,RISCV,389,unsigned getInstSizeInBytes(const MachineInstr *MI) { const unsigned Opcode = MI->getOpcode(); if (Opcode == RISCV::PseudoCALL || Opcode == RISCV::PseudoTAIL) { return 4; } if (Opcode == RISCV::PseudoLLA) { return 8; } return ~0U; } LLVM,RISCV,390,"void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, bool RegScavenger, bool newBlock, unsigned size, int &ErrorCode, unsigned BitSize, unsigned GPRRegClass, unsigned LUIOpcode, unsigned LOOpcode, unsigned PseudoBRINDOpcode, unsigned GPRReg, int ImmOffset) { if (ImmOffset > INT32_MAX || ImmOffset < INT32_MIN) { ErrorCode = RISCV::TARGET_ERROR; return; } if (RegScavenger) { unsigned Reg = MBB.getParent()->getRegScavenger().FindUnusedReg(&RI); if (!Reg) { ErrorCode = RISCV::REG_SCAVENGE_ERROR; return; }" LLVM,RISCV,391, LLVM,RISCV,392, LLVM,RISCV,393,"namespace { class RISCVFunctionInfo final : public MachineFunctionInfo { const MachineMemOperand *SpillAutoIncBytes; public: RISCVFunctionInfo(MachineFunction &MF) : SpillAutoIncBytes(nullptr) {} virtual ~RISCVFunctionInfo() {} }; class RISCVFrameLowering final : public TargetFrameLowering { const RISCVSubtarget &STI; public: RISCVFrameLowering(const RISCVSubtarget &STI) : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, 0), STI(STI) {} void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override { } void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override { } bool isFPCloseToIncomingSP() const override { return false; } }; } void RISCVVectorRegisterClass::addRegisterOperands(MachineInstrBuilder &MIB, unsigned OpFlags) const { for (auto MO : Registers) { MIB.addReg(MO.getReg(), getFlags() | OpFlags); } }" LLVM,RISCV,394,"void storeRegToStackSlot(RISCV::Register Reg, int StackFrameIndex) { RISCV::MachineFunction &MF = getFunction(); const RISCV::TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); DebugLoc DL = MF.front().getDebugLoc(); MachineRegisterInfo &MRI = MF.getRegInfo(); RISCV::Register RegClass; switch (Reg->getRegClass()) { case RISCV::GPRRegClass: RegClass = RISCV::GPRRegClass; break; case RISCV::FPR32RegClass: RegClass = RISCV::FPR32RegClass; break; default: report_fatal_error(""Can't store this register to stack slot""); } MachineInstrBuilder MI = BuildMI(MF, DL, TII.get(RISC::SW)); MI.addReg(Reg).addFrameIndex(StackFrameIndex).addImm(0); MRI.replaceRegWith(StackFrameIndex, Reg); MRI.clearKillFlags(StackFrameIndex); MRI.setRegAllocationHint(StackFrameIndex, RegClass); MachineBasicBlock &MBB = MF.front(); MBB.insert(MBB.begin(), MI); MF.RenumberBlocks(); for (MachineBasicBlock &MBB : MF) { MBB.sort(Comp); for (MachineInstr &MI : MBB) MI.setOrder(); } }" LLVM,RISCV,395, LLVM,RISCV,396,"void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, unsigned FrameIdx) { const TargetInstrInfo *TII = MBB.getParent()->getSubtarget().getInstrInfo(); DebugLoc DL = MI->getDebugLoc(); unsigned VReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); BuildMI(MBB, MI, DL, TII->get(RISCV::ADDI), VReg).addReg(SrcReg); const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); unsigned StoreOpc = 0; if (RC == &RISCV::GPRRegClass) { StoreOpc = RISCV::SW; } else if (RC == &RISCV::GPRV64I8RegClass) { StoreOpc = RISCV::SDVI8; } else if (RC == &RISCV::GPRV64I16RegClass) { StoreOpc = RISCV::SDVI16; } else if (RC == &RISCV::GPRV64I32RegClass) { StoreOpc = RISCV::SDVI32; } else if (RC == &RISCV::FPR32RegClass) { StoreOpc = RISCV::FSW; } else if (RC == &RISCV::FPR64RegClass) { StoreOpc = RISCV::FSD; } else { llvm_unreachable(""Can't store this register to stack slot""); return; } BuildMI(MBB, MI, DL, TII->get(StoreOpc)).addReg(VReg).addFrameIndex(FrameIdx); MI->eraseFromParent(); }" LLVM,RISCV,397,unsigned int getInstSizeInBytes(const MachineInstr *MI) { unsigned int Size = 0; switch (MI->getOpcode()) { case RISCV::PseudoCALL: case RISCV::PseudoTAIL: case RISCV::PseudoLLA: case RISCV::PseudoLA: Size = 8; break; default: Size = ~0U; break; } return Size; } LLVM,RISCV,398,"unsigned int getInstSizeInBytes(const MachineInstr &MI) { const MCInstrDesc &MCID = MI.getDesc(); switch (MI.getOpcode()) { case RISCV::PseudoCALL: case RISCV::PseudoTAIL: return 8; default: if (MCID.isTargetSpecific()) { const RISCVInstrInfo* TII = static_cast(MI.getMF().getSubtarget().getInstrInfo()); if (TII) { MachineOperand MO = MI.getOperand(3); if (MO.getSubReg()) return TII->getInstSizeInBytes(MCID, MO.getSubReg()); } } return ~0U; } }" LLVM,RISCV,399,"bool isCopyInstrImpl(MachineInstr *MI, const MCInstrDesc &MCID, unsigned ByteSize) { unsigned NumOperands = MI->getNumExplicitOperands(); if (NumOperands != 2) { return false; } MachineOperand Dest = MI->getOperand(0); MachineOperand Source = MI->getOperand(1); if (!Dest.isReg() || !Source.isReg()) { return false; } if (!TargetRegisterInfo::isPhysicalRegister(Dest.getReg()) || !TargetRegisterInfo::isPhysicalRegister(Source.getReg())) { return false; } if (MI->getOpcode() != RISCV::ADDI) { return false; } if (ByteSize != 4) { return false; } unsigned DestRegNum = RegisterInfo::virtReg2Index(Dest.getReg()); unsigned SourceRegNum = RegisterInfo::virtReg2Index(Source.getReg()); if (DestRegNum + 1 != SourceRegNum) { return false; } if (!RISCV::FSGNJ_D.contains(Dest.getReg()) && !RISCV::FSGNJ_S.contains(Source.getReg())) { return false; } return true; }" LLVM,RISCV,400,"bool analyzeBranch(MachineBasicBlock &MBB) { const auto &Instructions = MBB.instrs(); for (const auto &I : Instructions) { if (I.getOperand(RISCV::CCMASK_ANY) != nullptr) { return true; } } return false; } ``` This function can be called by the RISCV backend whenever a MachineFunction is being processed. For example, in FunctionRISCV.cpp, we can add a call to the analyzeBranch function before performing any other actions on the MBB: ``` void FunctionRISCV::emitFunction(MachineFunction &MF) { bool IsBranchCodeClear = analyzeBranch(MF.front()); }" LLVM,RISCV,401,"void expandPostRAPseudo(MachineInstr *MI) { MachineFunction *MF = MI->getParent()->getParent(); MachineBasicBlock &MBB = *MI->getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); uint16_t Opcode = MI->getOpcode(); switch (Opcode) { default: { llvm_unreachable(""Unhandled pseudo instruction""); break; } case RISCV::PseudoInstruction1: { Register DestReg = MI->getOperand(0).getReg(); Register SrcReg1 = MI->getOperand(1).getReg(); Register SrcReg2 = MI->getOperand(2).getReg(); DebugLoc DL = MI->getDebugLoc(); MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII->get(Opcode) .addReg(DestReg) .addReg(SrcReg1) .addReg(SrcReg2)); MIB.addImm(RISCV::RISCV); MRI.replaceRegWith(DestReg, RISCV::RISCV); MRI.replaceRegWith(SrcReg1, RISCV::RISCV); MRI.replaceRegWith(SrcReg2, RISCV::RISCV); MI->eraseFromParent(); break; } } }" LLVM,RISCV,402, int &FrameIndex) { const MachineOperand &DestOp = MI->getOperand(0); if (DestOp.isReg()) { DestReg = MR->getInstrInfo()->getFrameRegister(*MF); const MachineOperand &SrcOp = MI->getOperand(1); if (SrcOp.isFI()) { FrameIndex = SrcOp.getFrameIndex(); return true; } } return false; } LLVM,RISCV,403,"unsigned isStoreToStackSlot(const MachineInstr *MI, int &VirtOrPhysReg, int &FrameIndex) { if (MI->getOpcode() != RISCV::SIMPLESTORE) return 0; VirtOrPhysReg = MI->getOperand(0).getReg(); FrameIndex = MI->getOperand(1).getIndex(); return 1; }" LLVM,RISCV,404,"bool isBranchOffsetInRange(unsigned Opcode, int64_t Offset) { switch (Opcode) { case RISCV::BEQ: case RISCV::BNE: case RISCV::BLT: case RISCV::BGE: case RISCV::BLTU: case RISCV::BGEU: case RISCV::P_BEQIMM: case RISCV::P_BNEIMM: case RISCV::PseudoBR: case RISCV::LoopBranch: return (Offset >= -4095 && Offset <= 4094); case RISCV::JAL: return (Offset >= -1048575 && Offset <= 1048574); default: llvm_unreachable(""Unexpected opcode!""); } }" LLVM,RISCV,405,void reverseBranchCondition(std::vector &CondList) { if (CondList.size() != 3) { return true; } if ((!CondList[0].isReg() && !CondList[0].isImm()) || !CondList[1].isImm() || !CondList[2].isImm()) { return true; } if (CondList[0].isImm() && CondList[0].getImm() != RISCV) { return true; } if (CondList[1].getImm() != RISCV::LoopBranch) { return true; } if (CondList[2].getImm() != 0) { return true; } if (CondList[0].isReg()) { CondList[0].setImm(-1); } else { CondList[0].setImm(0); } return false; } LLVM,RISCV,406, LLVM,RISCV,407, LLVM,RISCV,408,unsigned int getInstSizeInBytes(const MachineInstr *MI) { unsigned int Size = 0; unsigned int Opcode = MI->getOpcode(); switch (Opcode) { case RISCV::ADDI: case RISCV::ANDI: case RISCV::AUIPC: case RISCV::BEQ: case RISCV::BNE: case RISCV::JAL: case RISCV::JALR: case RISCV::LB: case RISCV::LBU: case RISCV::LH: case RISCV::LHU: case RISCV::LUI: case RISCV::ORI: case RISCV::SB: case RISCV::SH: case RISCV::SLLI: case RISCV::SLTI: case RISCV::SLTIU: case RISCV::SRAI: case RISCV::SRLI: case RISCV::XORI: Size = 4; break; case RISCV::PseudoCALL: case RISCV::PseudoTAIL: Size = 8; break; case RISCV::PseudoLLA: case RISCV::PseudoLA: case RISCV::PseudoLA_TLS_IE: case RISCV::PseudoLA_TLS_GD: Size = 8 + MI->getOperand(2).getImm() * 4; break; default: Size = ~0U; } return Size; } LLVM,RISCV,409, LLVM,RISCV,410, const char* FailMsg) { if (RegClass.contains(SrcReg) && RegClass.contains(DstReg)) { return; } LLVM,RISCV,411,unsigned int getInstSizeInBytes(const MachineInstr &MI){ switch(MI.getOpcode()){ case RISCV::PseudoCALL: case RISCV::PseudoTAIL: return 8; default: return ~0U; } } LLVM,RISCV,412, LLVM,RISCV,413, LLVM,RISCV,414,"bool isBranchOffsetInRange(int opcode, int offset) { if (opcode == RISCV::BEQ || opcode == RISCV::BNE || opcode == RISCV::BLT || opcode == RISCV::BGE || opcode == RISCV::BLTU || opcode == RISCV::BGEU || opcode == RISCV::JAL || opcode == RISCV::PseudoBR) { if (offset >= -2048 && offset <= 2047) { return true; } else { return false; } } else { return false; } }" LLVM,RISCV,415," int &DestReg, int &FrameIndex) const { if (MI->getOpcode() == RISCV::LB || MI->getOpcode() == RISCV::LBU || MI->getOpcode() == RISCV::LH || MI->getOpcode() == RISCV::LHU || MI->getOpcode() == RISCV::LW || MI->getOpcode() == RISCV::FLW || MI->getOpcode() == RISCV::LWU || MI->getOpcode() == RISCV::LD || MI->getOpcode() == RISCV::FLD) { DestReg = MI->getOperand(0).getReg(); FrameIndex = MI->getOperand(1).getIndex(); return true; } return false; }" LLVM,RISCV,416, LLVM,RISCV,417,"RISCV::GPRRegClass *RC, int FI, const TargetInstrInfo &TII, DebugLoc DL) { MachineFunction &MF = *(MBB.getParent()); MachineFrameInfo &MFI = MF.getFrameInfo(); const RISCVRegisterInfo &TRI = *static_cast(MF.getSubtarget().getRegisterInfo()); assert(FI < MFI.getObjectIndexEnd() && ""Cannot access stack slot beyond the stack size""); int Offset = MFI.getObjectOffset(FI); int RegSize = TRI.getRegSizeInBytes(RC->getID()); MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, TII.get(RISCV::LW)) .addReg(DestReg, RegState::Define) .addFrameIndex(FI) .addImm(0); if (RegSize == 8) { MI.setDesc(TII.get(RISCV::LD)); } if (RC == &RISCV::FPR32RegClass) { MI.setDesc(TII.get(RISCV::FLW)); } else if (RC == &RISCV::FPR64RegClass) { MI.setDesc(TII.get(RISCV::FLD)); } MI.setMemRefs(MF, Offset, MachineMemOperand::MONone); DEBUG(dbgs() << ""Loading register "" << TRI.getName(DestReg) << "" from frame index "" << FI << ""\n""); }" LLVM,RISCV,418,"void removeBranch(MachineBasicBlock *MBB) { MachineInstr *LastInstr = MBB->getFirstTerminator(); if(!LastInstr) return; if(!LastInstr->isBranch()) return; if(!LastInstr->isIdenticalTo(MachineInstr::Create( TargetOpcode::COPY, DebugLoc() , false, false), TargetOpcode::COPY, DebugLoc() , false, false), 0, 0, 0, MachineInstr::Create( TargetOpcode::ORI, DebugLoc() , false, false), MachineInstr::Create( TargetOpcode::SLTI, DebugLoc() , false, false), MachineInstr::Create( TargetOpcode::SLTI, DebugLoc() , false, false))) return; MBB->erase(LastInstr); }" LLVM,RISCV,419,"bool reverseBranchCondition(CONST_STRING_LIST ConditionList) { if (ConditionList.size() != 3) { return true; } std::string CondCode = ConditionList[0]; int CondFlag = std::stoi(ConditionList[1]); if (CondCode != ""RISCV"" || CondFlag != 3) { return true; } if (CondCode == ""BEQ"" || CondCode == ""BNE"" || CondCode == ""BLT"" || CondCode == ""BGE"" || CondCode == ""BLTU"" || CondCode == ""BGEU"") { if (CondCode == ""BEQ"") { CondCode = ""BNE""; } else if (CondCode == ""BNE"") { CondCode = ""BEQ""; } else if (CondCode == ""BLT"") { CondCode = ""BGE""; } else if (CondCode == ""BGE"") { CondCode = ""BLT""; } else if (CondCode == ""BLTU"") { CondCode = ""BGEU""; } else if (CondCode == ""BGEU"") { CondCode = ""BLTU""; } ConditionList.clear(); ConditionList.push_back(CondCode); ConditionList.push_back(std::to_string(CondFlag)); ConditionList.push_back(""Invalid branch condition!""); return false; } return true; }" LLVM,RISCV,420, LLVM,RISCV,421,"std::string getName(const Function &F) { LLVMContext &Context = F.getContext(); Constant *name = ConstantDataArray::getString(Context, ""RISCV"", true); GlobalVariable *gv = new GlobalVariable( *F.getParent(), name->getType(), true, GlobalValue::PrivateLinkage, name, ""plan_name"" ); Constant *zero = ConstantInt::get(Type::getInt32Ty(Context), 0); Value *indices[] = {zero, zero}; Constant *name_address = ConstantExpr::getGetElementPtr(gv, indices); LoadInst *load_name = new LoadInst( name_address, ""plan_name"" ); std::string result = """"; result += load_name->getName().str(); unsigned VF = F.getAttributes().getVectorizationFactor(); unsigned UF = F.getAttributes().getUnrollFactor(); result += "" with VF="" + std::to_string(VF) + "" and UF="" + std::to_string(UF); return result; }" LLVM,RISCV,422,void select(Instruction I) { if (I.getTargetSpecificValue() == RISCV) { } else if (I.getTargetSpecificValue() == RISCV) { } else { } } LLVM,RISCV,423,"void dump() { std::cerr << ""RISCV\n""; std::cerr << ""RISCVAddressingMode = "" << RISCVAddressingMode << ""\n""; std::cerr << ""Base = "" << Base << ""\n""; std::cerr << ""0 = null\n""; std::cerr << ""Offset = "" << Offset << ""\n""; }" LLVM,RISCV,424,"StringRef getPassName(const StringRef &TargetSpecificValue) { if (TargetSpecificValue == ""RISCV DAG->DAG Pattern Instruction Selection"") { return StringRef(""RISCV Instruction Selection""); } return StringRef(""unknown""); }" LLVM,RISCV,425,"bool isValidOffset(RISCVAddressingMode Mode, int Offset) { switch (Mode) { case RISCVAddressingMode::OffRange: if (Offset < -2048 || Offset > 2047) { return false; } break; case RISCVAddressingMode::Off12Only: if (Offset < -4096 || Offset > 4095) { return false; } break; default: return true; } return true; } This function takes in two parameters: the addressing mode and the offset value. It then checks the addressing mode and based on its value, it checks if the offset is within the specified range. If the offset is not within the range, it returns false to indicate that the offset is not valid. If the addressing mode is not OffRange or Off12Only, we can either handle it in this function or return true if we don't have any other target specific values to check for. Example usage: if (isValidOffset(Mode, Offset)) { } else { }" LLVM,RISCV,426, LLVM,RISCV,427, LLVM,RISCV,428,"std::pair SelectInlineAsmMemoryOperand(const SDValue & N, char Constraint, SelectionDAG & CurDAG) { if(Constraint != 'RISCV' && Constraint != 'riscv') { return std::make_pair(SDValue(), SDValue()); }" LLVM,RISCV,429,"void Select(const RISCVTargetLowering *TLI, SDNode *Node, std::vector &OutOps) { unsigned Opcode = Node->getOpcode(); if (Opcode == ISD::Constant) { uint64_t Val = cast(Node)->getZExtValue(); SDValue ConstNode = DAG.getTargetConstant(Val, SDLoc(Node), MVT::i32); OutOps.push_back(ConstNode); return; } if (Opcode == ISD::FrameIndex) { int FrameIdx = cast(Node)->getIndex(); SDValue FrameIdxNode = DAG.getTargetFrameIndex(FrameIdx, MVT::i32); OutOps.push_back(FrameIdxNode); return; } if (Opcode == ISD::ADD) { SDValue Op0 = Node->getOperand(0); SDValue Op1 = Node->getOperand(1); if (Op0.getOpcode() == RISCVISD::CONSTANT && Op1.getOpcode() == RISCVISD::FRAME_INDEX) { SDValue Addi = DAG.getNode(RISCVISD::ADDI, SDLoc(Node), MVT::i32, Op0, Op1); OutOps.push_back(Addi); return; } } SelectionDAG::SelectNodeTo(Node, RISCV::X0); }" LLVM,RISCV,430,"SDValue RISCVTargetLowering::SelectInlineAsmMemoryOperand(SDValue Op, unsigned ConstraintCode, const SDLoc &DL, SelectionDAG &DAG) { if (ConstantSDNode *C = dyn_cast(Op)) return CurDAG->getTargetConstant(*C->getConstantIntValue(), DL, Op.getValueType()); switch(ConstraintCode) { case RISCV: { return CurDAG->getTargetGlobalAddress(cast(Op)->getGlobal(), DL, Op.getValueType()); } case RISCV: { return CurDAG->getTargetJumpTable(cast(Op)->getIndex(), Op.getValueType()); }" LLVM,RISCV,431, LLVM,RISCV,432,void RISCVTargetLowering::PostprocessISelDAG(MachineInstr *MI) { if (MI->getOpcode() == RISCV::ADD) { MI->setDesc(get(RISCV::OR)); } } LLVM,RISCV,433, LLVM,RISCV,434,"switch (N->getOpcode()) { case RISCV::X0: if (N->getOperand(0).getOpcode() == ISD::FrameIndex) { SDValue Imm = CurDAG->getTargetConstant(N->getOperand(0).getOperand(1)->getIndex(), 0, MVT::i64); return CurDAG->getMachineNode(RISCV::ADDI, SDLoc(N), N->getValueType(0), N->getOperand(0).getOperand(0), Imm); } llvm_unreachable(""Unsupported operation!""); case ISD::SRL: SDValue Imm = CurDAG->getTargetConstant(1, SDLoc(N), MVT::i64); return CurDAG->getMachineNode(RISCV::SRLIW, SDLoc(N), N->getValueType(0), N->getOperand(0), Imm); case RISCVISD::READ_CYCLE_WIDE: return CurDAG->getMachineNode(RISCV::ReadCycleWide, SDLoc(N), N->getVTList(), N->getOperand(0), N->getOperand(1)); default: llvm_unreachable(""Unsupported operation!""); }" LLVM,RISCV,435, LLVM,RISCV,436,"void PostprocessISelDAG(SelectionDAG &DAG) { for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), E = DAG.allnodes_end(); I != E; I++) { if (I->getOpcode() == ISD::ADD) { SDValue NewNode = DAG.getNode(RISCVISD::ADD, SDLoc(I), MVT::i32, I->getOperand(0), I->getOperand(1)); DAG.ReplaceAllUsesOfValueWith(SDValue(I, 0), NewNode); } } }" LLVM,RISCV,437,"void PreprocessISelDAGSelectionDAG(TargetLowering::DAGCombinerInfo &DCI, SDNode *N) { SelectionDAG &DAG = DCI.DAG; MachineFunction &MF = DAG.getMachineFunction(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const RISCVSubtarget &Subtarget = MF.getSubtarget(); unsigned Opcode = N->getOpcode(); if (Opcode == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) { SDValue Ops[3]; Ops[0] = N->getOperand(0); Ops[1] = N->getOperand(1); Ops[2] = N->getOperand(2); SDValue NewNode = DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, SDLoc(N), MVT::i64, Ops); DAG.ReplaceAllUsesWith(N, NewNode); dbgs() << ""RISCV PreprocessISelDAG - Replaced node with target specific value\n""; return; }" LLVM,RISCV,438,"SDValue RISCVTargetLowering::Select(SDValue Node, LLVMContext& Context) { if (Node.getValueType() == RISCV::X0) { return Node; } if (Node.isConstant() && Node.getValueType().isInteger()) { return CurDAG->getTargetConstant(RISCV::X0, Node.getValueType()); } return TargetLowering::Select(Node, Context); }" LLVM,RISCV,439," LLVM_ATTRIBUTE_USED static SDNode *Select(const SDNode *N) { unsigned Opc = N->getOpcode(); EVT VT = N->getValueType(0); SelectionDAG &DAG = N->getDAG(); const RISCVSubtarget *ST = &getSubtarget(RISCV_ADJ); if (isSDNodeSourceOfSingleReuseableNarrowMask(N)) return SelectVUreinterpretWithSelect(N, DAG); switch(N->getOpcode()) { default: break; case ISD::Constant: { if (VT.isInteger()) { int64_t Val = cast(N)->getSExtValue(); EVT MemVT = N->getOperation().getValueType(); SDNode *ChildNode = NULL; if (Val == 0) ChildNode = DAG.getTargetRegisterInfo().getFrameRegister(*DAG.getContext()); else { int ValLo = Val & 0xFFFFFFFF; int ValHi = Val >> 32; SDValue Hi = SDValue(DAG.getConstant(ValHi, MemVT), 0); SDValue Lo = SDValue(DAG.getConstant(ValLo, MemVT), 0); if (ValHi == 0) { ChildNode = DAG.getNode(ISD::ADD, VT, Hi, Lo); } else if (ValLo == 0) { if (ValHi == 1) ChildNode = Hi; else ChildNode = DAG.getNode(ISD::SHL, VT, Hi, DAG.getConstant(ValHi, VT)); } else { ChildNode = DAG.getNode(ISD::SHL, VT, Lo, DAG.getConstant(ValLo, VT)); ChildNode = DAG.getNode(ISD::ADD, VT, args, DAG.getPointer(i)); } } assert(ChildNode != NULL && ""Select node was not created!""); return ChildNode; } break; }" LLVM,RISCV,440,"void RISCVTargetLowering::PostprocessISelDAG(TargetMachine &TM, SelectionDAG &DAG) { const RISCVSubtarget &ST = TM.getSubtarget(); SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin(); while (NI != DAG.allnodes_end()) { if (NI->getOpcode() == ISD::LOAD) { SDValue Ptr = NI->getOperand(1); EVT VT = NI->getValueType(0); SDNode *LoadOpt = RISCVTargetLowering::CustomFoldLoad(Ptr, VT, DAG); if (LoadOpt) { DAG.ReplaceAllUsesOfValueWith(SDValue(NI, 0), SDValue(LoadOpt, 0)); DAG.RemoveDeadNode(NI); } } ++NI; } DAG.finalizeLowering(); }" LLVM,RISCV,441,void RISCVTargetLowering::Select(SDNode *N) const { switch (N->getOpcode()) { case ISD::Constant: { break; } default: { SelectCode(N); break; } } } LLVM,RISCV,442,"function PreprocessISelDAG(M) { if (M.getTarget().getTriple().isArch(""riscv"")) { for (SI in M) { if (SI->getOpcode() == RISCV::SPLAT_VECTOR_SPLIT_I64_VL) { Ops = SI->operands(); if (Ops.getNumOperands() != 3) { errs() << ""Unexpected number of operands in SPLAT_VECTOR_SPLIT_I64_VL instruction!""; return; } VT1 = Ops[0]->getValueType(); VT2 = Ops[1]->getValueType(); VT3 = Ops[2]->getValueType(); if (VT1 != MVT::i64 || VT2 != MVT::i32 || VT3 != MVT::i32) { errs() << ""Unexpected VTs in SPLAT_VECTOR_SPLIT_I64_VL instruction!""; return; } NewSI = M->CreateNode(RISCV, RISCV, RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, 0, {MVT::i64, MVT::i32, MVT::i32}, {Ops[0], Ops[1], Ops[2]}); M.replaceNode(SI, NewSI); } } } }" LLVM,RISCV,443,"bool RISCVTargetLowering::Select(MachineInstr *I, const RISCVInstrInfo &II, const RISCVRegisterInfo &RI, const RISCVFrameLowering &FL, SelectionDAG &DAG) { if (!isRISCVInstr(*I)) return false; unsigned Opcode = I->getOpcode(); switch (Opcode) { case RISCV::X0: { SDValue Reg = DAG.getRegister(RISCV::X0, MVT::i64); I = DAG.UpdateNodeOperands(I, Reg); return true; } case RISCV::ADDI: { SDValue Op1 = DAG.getRegister(RISCV::X0, MVT::i64); SDValue Op2 = DAG.getConstant(0, MVT::i64); I = DAG.UpdateNodeOperands(I, Op1, Op2); return true; } case RISCV::SRL: { SDValue Op1 = DAG.getConstant(1, MVT::i64); SDValue Op2 = DAG.getConstant(0xffffffff, MVT::i64); I = DAG.UpdateNodeOperands(I, Op1, Op2); return true; } case RISCV::SRLIW: { SDValue Op1 = DAG.getConstant(0, MVT::i32); SDValue Op2 = DAG.getConstant(0xffffffff, MVT::i32); I = DAG.UpdateNodeOperands(I, Op1, Op2); return true; } default: return false; } }" LLVM,RISCV,444,"namespace { class RISCVDAGToDAGISel : public SelectionDAGISel { RISCVSubtarget &Subtarget; const RISCVTargetLowering &Lowering; public: explicit RISCVDAGToDAGISel(RISCVTargetMachine &TM, CodeGenOpt::Level OptLevel) : SelectionDAGISel(TM, OptLevel), Subtarget(TM.getSubtarget()), Lowering(*TM.getTargetLowering()) {} void Select(SDNode *Node) override { LLVM_DEBUG(dbgs() << ""\nSelecting: ""); DebugLoc dl = Node->getDebugLoc(); if (Node->isMachineOpcode()) { LLVM_DEBUG(dbgs() << ""\n== ""; Node->dump(&DAG)); return; } if (trySelect(Node)) return; SelectCode(Node); }" LLVM,RISCV,445,"bool RISCVTargetLowering::Select(MachineInstrBuilder &MIB, MachineOperand *Op) { if (MIB.getOpcode() == RISCV::ADDI && Op->isFI()) { SDValue Constant = DAG.getNode(RISCVISD::Constant, SDLoc(Op), MVT::i32, MIB.getOperand(1)); SDValue X0 = DAG.getNode(RISCVISD::X0, SDLoc(Op), MVT::i32); SDValue FrameIndex = DAG.getNode(RISCVISD::FrameIndex, SDLoc(Op), MVT::i32, Op->getIndex()); SDValue Equal = DAG.getNode(RISCVISD::Equal, SDLoc(Op), MVT::i1, X0, FrameIndex); SDValue NewLine = DAG.getNode(RISCVISD::Newline, SDLoc(Op), MVT::Other); SDValue ADDI = DAG.getNode(RISCVISD::ADDI, SDLoc(Op), MVT::i32, Constant, NewLine); MIB.addUse(ADDI); return true; } return false; }" LLVM,RISCV,446,"std::string getPassName() { return ""RISCV DAG->DAG Pattern Instruction Selection""; }" LLVM,RISCV,447, LLVM,RISCV,448,"void RISCVTargetLowering::PreprocessISelDAG() { SelectionDAG &DAG = DAG.getGraph(); const RISCVTargetMachine &TM = (const RISCVTargetMachine &) DAG.getTarget().getTargetMachine(); for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), E = DAG.allnodes_end(); I != E; ++I) { if (I->getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL && I->getNumOperands() != 3) { const SDValue &N0 = I->getOperand(0); const SDValue &N1 = I->getOperand(1); const SDValue &N2 = I->getOperand(2); SDLoc DL(I); SDValue Ops[] = {N0, N1, N2}; SDValue TF = DAG.getMergeValues(Ops); SDValue ErrMsg = DAG.getConstant(""Unexpected number of operands"", DL, MVT::Other); SDValue NewNode = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, TF, ErrMsg); DAG.ReplaceAllUsesOfValueWith(SDValue(I, 0), SDValue(NewNode.getNode(), 0)); } } }" LLVM,RISCV,449, LLVM,RISCV,450," const SDLoc &DL, SelectionDAG &DAG) { const RISCVSubtarget &Subtarget = DAG.getTarget().getSubtarget(); switch (ConstraintCode) { case 'R': if (Op.getValueType() == MVT::i32) { return Op; } break; case 'K': if (Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) { return Op; } break; case 'A': if (Op.getValueType() == MVT::i32 && Subtarget.hasRV32D()) { return Op; } else if (Op.getValueType() == MVT::i64 && Subtarget.hasRV64D()) { return Op; } break; case 'J': if (Op.getValueType() == MVT::i32 && Subtarget.hasRV32C()) { return Op; } else if (Op.getValueType() == MVT::i64 && Subtarget.hasRV64C()) { return Op; } break; } return SDValue(); }" LLVM,RISCV,451,MVT getScalarShiftAmountTy(Type *ShiftAmtTy) { if (!ShiftAmtTy->isIntegerTy()) { return MVT(); } unsigned int BitWidth = ShiftAmtTy->getIntegerBitWidth(); if (BitWidth == RISCV_PTR_SIZE) { return (MVT) RISCV_MVT_I32; } else if (BitWidth == RISCV_PTR_SIZE * 2) { return (MVT) RISCV_MVT_I64; } else { return MVT(); } } LLVM,RISCV,452,"MVT::SimpleValueType getSetCCResultType(ISD::CondCode cc, MVT::i32 VT) { if (VT != MVT::i32) { return MVT::Other; } switch (cc) { case ISD::SETNE: case ISD::SETEQ: case ISD::SETGT: case ISD::SETLT: case ISD::SETGE: case ISD::SETLE: return MVT::i1; default: return MVT::Other; } }" LLVM,RISCV,453,"bool isFMAFasterThanFMulAndFAdd(const TargetMachine &TM) { const RISCVSubtarget &STI = TM.getSubtarget(); const MCInstrInfo *MCII = TM.getMCInstrInfo(); const MCInstrDesc &FMA = MCII->get(RISCV::FMA_D); int FMAProcRes = getProcResource(FMA, STI); int FMulAddRes = getProcResource(RISCV::FMUL_D, STI) + getProcResource(RISCV::FADD_D, STI); return FMAProcRes < FMulAddRes; }" LLVM,RISCV,454,"void addLiveIn(MachineBasicBlock *MBB, Register Reg) { if(!MBB) return; if(!Reg.isRISCV()) return; MBB->addLiveIn(Reg, RISCV); }" LLVM,RISCV,455, SelectionDAG &DAG) { const TargetRegisterClass *RC = RISCV::GPRRegClass; const unsigned NumGPRs = RC->getNumRegs(); bool Flag = true; if (RegInfo->getNumAllocatableRegs(*RC) < NumReturnRegs) { Flag = false; } for (unsigned int i = 0; i < Outs.size(); ++i) { MVT ArgVT = Outs[i].VT; uint64_t ArgSize = EVT(ArgVT).getSizeInBits(); if (ArgSize > NumReturnRegs * 8) { Flag = false; break; } } return Flag; } LLVM,RISCV,456,"if (!MI->getDesc().usesCustomInserter()) { llvm_unreachable(""Unexpected instr type to insert""); } unsigned Opcode = MI->getOpcode(); unsigned OpSize = MI->getDesc().getSize(); const MCInstrDesc &MCID = MI->getDesc(); unsigned Width = MCID.getSize(); switch (Opcode) { case RISCV::SELECT_CC: case RISCV::SELECT_CC64: EmitInstrRegRegImm(BB, I, DL, RISCV::SELECT_CC, RISCV::SELECT_CC64); break; case RISCV::FSELECT_CC_F: case RISCV::FSELECT_CC_D: EmitInstrRegRegImm(BB, I, DL, RISCV::FSELECT_CC_F, RISCV::FSELECT_CC_D); break; case RISCV::CALL: case RISCV::CALLREG: case RISCV::CALL64: case RISCV::CALLREG64: EmitInstrCall(BB, I, DL, RISCV::CALL, RISCV::CALLREG, RISCV::CALL64, RISCV::CALLREG64); break; default: llvm_unreachable(""Unexpected instr type to insert""); }" LLVM,RISCV,457,"string getConstraintType(char constraintLetter) { switch (constraintLetter) { case 'G': return ""RegisterClass""; case 'C': return ""Immediate""; case 'R': return ""Register""; default: return """"; } }" LLVM,RISCV,458,"llvm::ArrayRef RISCVTargetLowering::getExceptionPointerRegister() { static const llvm::MCPhysReg regs[] = {RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13}; return llvm::ArrayRef(regs); }" LLVM,RISCV,459,unsigned getExceptionSelectorRegister(const RISCVSubtarget &ST) { return RISCV::evec_64; } LLVM,RISCV,460,"std::string getRegForInlineAsmConstraint(std::string PRC) { } ``` Solution: ``` std::string getRegForInlineAsmConstraint(std::string PRC) { std::vector tokens; std::stringstream ss(PRC); std::string token; while (std::getline(ss, token, ',')) { tokens.push_back(token); } if (tokens.size() > 6) { return tokens[6]; } else { return """"; } }" LLVM,RISCV,461,"int getSingleConstraintMatchWeight(std::string constraint, OperandType opType) { int weight = 0; if (constraint == ""RISCV"" || opType == RISCV) { weight += 1; } if (constraint == ""8"" || opType == 8) { weight += 2; } if (constraint == ""12"" || opType == 12) { weight += 3; } if (constraint == ""16"" || opType == 16) { weight += 4; } if (constraint == ""20"" || opType == 20) { weight += 5; }" LLVM,RISCV,462,"if (targetNode == ""RISCV,RISCV,0,0,""Unexpected node type.""""){ print(""Error: Unexpected node type.""); } else { print(""Target node: "" + targetNode.getName()); }" LLVM,RISCV,463,"StringRef getTargetNodeName(TargetMachine *TM, bool Verbose) { if (TM->getTargetTriple().isRISCV()) { if (Verbose) return ""RISCV""; else return ""R""; } return """"; }" LLVM,RISCV,464,"void RISCVTargetMachine::Initialize(RISCVTarget &RI, RISCVSubtarget &STI) { RI.setFeatureEnabled(TheRISCVTargetFeatures.Component.floatABI(), STI.useSoftFloat()); RI.setFeatureEnabled(TheRISCVTargetFeatures.Component.fp64(), STI.useFP64()); STI.InstrInfo.reset(new RISCVInstrInfo(RI)); STI.RegInfo.reset(new RISCVRegisterInfo(RI)); STI.FrameLowering.reset(new RISCVFrameLowering(STI)); STI.TSInfo.reset(new RISCVTargetSubtargetInfo(STI)); STI.ISel.reset(new RISCVInstructionSelector(STI, *this)); }" LLVM,RISCV,465,"bool isFPImmLegal(const RISCVSubtarget *Subtarget, double Val, EVT VT) { if (!Subtarget->hasFP64() && VT != MVT::f32) return false; if (Val < -4095.5 || Val > 4095.5) return false; if (VT == MVT::f64 && (Val > 4095.5 || Val < -262143.5)) return false; return true; }" LLVM,RISCV,466,"bool isOffsetFoldingLegal(const ConstantOffsetAddr &Offset, const GlobalAddressOperand &Addr) { if (Addr.getTargetSymbol() == nullptr) { return false; } RISCVSubtarget ST(Addr.getTargetSymbol()->getParent()->getTargetTriple(), Addr.getTargetSymbol()->getName()); if (!ST.isRISCV()) { return false; } if (Offset.baseOffset == 0) { return true; } int64_t BaseOffset = Offset.baseOffset.getValue(); if (Offset.symbolOffset != 0) { return false; } else if (BaseOffset < 0) { return false; } else if (BaseOffset % ST.getStackAlignment() != 0) { return false; } return true; }" LLVM,RISCV,467,"void LowerAsmOperandForConstraint(MCInst& Inst, unsigned N, unsigned Address, const AsmPrinter& AP) { int64_t Val = Inst.getOperand(N).getImm(); switch (Address) { case RISCV: assert(Val == RISCV && ""Unexpected operand value for RISCV target!""); Inst.addOperand(MCOperand::createImm(RISCV)); break; case RISCV: assert(Val == RISCV && ""Unexpected operand value for RISCV target!""); Inst.addOperand(MCOperand::createImm(RISCV)); break; case 1: assert(Val == 1 && ""Unexpected operand value for 1 target!""); Inst.addOperand(MCOperand::createImm(1)); break; case 0: assert(Val == 0 && ""Unexpected operand value for 0 target!""); Inst.addOperand(MCOperand::createImm(0)); break; case 8: assert(Val == 8 && ""Unexpected operand value for 8 target!""); Inst.addOperand(MCOperand::createImm(8)); break; case 12: assert(Val == 12 && ""Unexpected operand value for 12 target!""); Inst.addOperand(MCOperand::createImm(12)); break; case 16: assert(Val == 16 && ""Unexpected operand value for 16 target!""); Inst.addOperand(MCOperand::createImm(16)); break; case 20: assert(Val == 20 && ""Unexpected operand value for 20 target!""); Inst.addOperand(MCOperand::createImm(20)); break; case 0x7fffffff: assert(Val == 0x7fffffff && ""Unexpected operand value for 0x7fffffff target!""); Inst.addOperand(MCOperand::createImm(0x7fffffff)); break; default: llvm_unreachable(""Unknown target specific value!""); } }" LLVM,RISCV,468, LLVM,RISCV,469," unsigned ArgFlags) { LLT Ty = MIRBuilder.getMRI()->getType(VReg); if (!Ty.isScalar()) return false; const RISCVTargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); EVT VT = TLI.getValueType(VT); SDValue Flag = MIRBuilder.buildTargetInstr(RISCV::RET_FLAG, {VT}, ArgFlags)->getValue(0); MIRBuilder.buildInstr(RISCVISD::RET_FLAG) .addUse(Flag) .addImm(1) .addImm(1); MIRBuilder.getMBB().addSuccessor(MIRBuilder.getMBB()); return true; }" LLVM,RISCV,470,"StringRef getTargetNodeName(unsigned Opcode) { switch (Opcode) { default: return """"; case RISCV::RISCV: return ""RISCV""; case RISCV::RISCVISD::NodeType: return ""RISCVISD::NodeType""; case RISCV::RISCVISD::FIRST_NUMBER: return ""RISCVISD::FIRST_NUMBER""; case RISCV::RISCVISD::RET_FLAG: return ""RISCVISD::RET_FLAG""; case RISCV::RISCVISD::CALL: return ""RISCVISD::CALL""; case RISCV::RISCVISD::SELECT_CC: return ""RISCVISD::SELECT_CC""; case RISCV::RISCVISD::BuildPairF64: return ""RISCVISD::BuildPairF64""; case RISCV::RISCVISD::SplitF64: return ""RISCVISD::SplitF64""; } }" LLVM,RISCV,471,Function* LowerOperation(Operation* op) { if (!op || !RISCVTargetSupported(op->getOpcode())) { return nullptr; } const OperationTarget* target = op->getTarget(); if (target->isUnimplementedOperand()) { return nullptr; } switch (op->getOpcode()) { case ISD::GlobalAddress: break; case ISD::BlockAddress: break; case ISD::ConstantPool: break; case ISD::SELECT: break; case ISD::VASTART: break; case ISD::FRAMEADDR: break; case ISD::RETURNADDR: break; default: break; } return nullptr; } bool RISCVTargetSupported(ISD::Opcode opcode) { switch (opcode) { case ISD::GlobalAddress: case ISD::BlockAddress: case ISD::ConstantPool: case ISD::SELECT: case ISD::VASTART: case ISD::FRAMEADDR: case ISD::RETURNADDR: return true; default: return false; } } LLVM,RISCV,472," SelectionDAG &DAG) const { if (Outs.size() > 0) { EVT RetVT = getPointerTy(DAG.getDataLayout()); MVT RetMVT = RetVT.getSimpleVT().SimpleTy; SDValue Flag = DAG.getTargetConstant(RISCVISD::RET_FLAG, DL, RetMVT); return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, Chain, Flag); } return SDValue(); }" LLVM,RISCV,473," RISCVCC::RISCV Subtarget) { for (unsigned I = 0, E = Outs.size(); I < E; ++I) { ISD::OutputArg &Out = Outs.get(I); const RISCVCC::ArgFlagsTy &Flags = Out.Flags; switch(Flags.getByValType()) { case ISD::ArgFlagsTy::ByValNever: continue; case ISD::ArgFlagsTy::ByVal: if (Flags.isNest() || Flags.isInAlloca()) return false; else continue; default: RISCVABI::ABI CallABI = Flags.isInConsecutiveRegs() ? RISCVABI::ABI_O32 : Flags.isSRet() ? RISCVABI::ABI_SRet : Flags.isByVal() ? (RISCVABI::ABI)ABI : RISCVABI::ABI_NA; if (Flags.isByVal() && (CallABI == RISCVABI::ABI_NA || LLVMABINameForRISCVABI(CallABI) == ""gnuabi64"")) CallABI = RISCVABI::ABI_O32; if ((I != E-1 && (ByValOrInsnReg || Flags.isZExt() || Flags.isSRet() || CallABI != RISCVABI::ABI_NA)) || !Out.getEmitterSavesLocalFrame()) return false; } } return true; }" LLVM,RISCV,474,"unsigned ComputeNumSignBitsForTargetNode(unsigned Opc, unsigned NumBits) { unsigned NumSignBits = NumBits; bool isSRA = false; bool isSRL = false; bool isLogicalOrArithmeticShift = true; switch (Opc) { case RISCVISD::SLLW: break; case RISCVISD::SRAW: isSRA = true; break; case RISCVISD::SRLW: isSRL = true; break; case RISCVISD::DIVW: case RISCVISD::DIVUW: case RISCVISD::REMUW: break; default: llvm_unreachable(""Invalid opcode used in ComputeNumSignBitsForTargetNode""); } if (isLogicalOrArithmeticShift) { if (NumBits > 1) { NumSignBits = NumBits - 1; if (!isSRA && isSRL) { NumSignBits++; } } } return NumSignBits; }" LLVM,RISCV,475," MCSubRegIndex Reg2, const TargetInstrInfo *TII) { if (!MI->getDesc().getUsesCustomInserter()) return; if (MI->getOpcode() != RISCV::ReadCycleWide) { llvm_unreachable(""Unexpected instr type to insert""); } assert(MI->getNumOperands() == 5 && ""Unexpected number of operands for ReadCycleWide instruction""); assert(MI->getOperand(0).getReg() == DstReg && ""Invalid destination register for ReadCycleWide instruction""); auto SubReg = MI->getOperand(1).getSubReg(); if (SubReg != Reg1) { llvm_unreachable(""ReadCycleWide is only to be used on riscv32""); }" LLVM,RISCV,476, LLVM,RISCV,477,RISCV::X10 getExceptionPointerRegister() { return RISCV::X10; } LLVM,RISCV,478,RISCV::X11 getExceptionSelectorRegister(int physicalReg) { return RISCV::X11; } LLVM,RISCV,479, LLVM,RISCV,480, LLVM,RISCV,481,bool getTgtMemIntrinsic( Intrinsic::ID ID ) { switch (ID) { case Intrinsic::riscv_masked_atomicrmw_xchg_i32: case Intrinsic::riscv_masked_atomicrmw_add_i32: case Intrinsic::riscv_masked_atomicrmw_sub_i32: case Intrinsic::riscv_masked_atomicrmw_nand_i32: case Intrinsic::riscv_masked_atomicrmw_max_i32: case Intrinsic::riscv_masked_atomicrmw_min_i32: case Intrinsic::riscv_masked_atomicrmw_umax_i32: case Intrinsic::riscv_masked_atomicrmw_umin_i32: case Intrinsic::riscv_masked_cmpxchg_i32: return true; default: return false; } } LLVM,RISCV,482,"bool isDesirableToCommuteWithShift(unsigned Opcode, const SDNode *N) { if (Opcode != ISD::ADD && Opcode != ISD::OR) { return false; } if (!isa(N->getOperand(0)) || !isa(N->getOperand(1))) { return false; } const ConstantSDNode *Op0 = cast(N->getOperand(0)); const ConstantSDNode *Op1 = cast(N->getOperand(1)); if (!Op0->isInt64() || !Op0->isInt64()) { return false; } int Cost = RISCVMatInt::getIntMatCost(Opcode, MVT::i64, MVT::i64); int NewCost = RISCVMatInt::getIntMatCost(Opcode, MVT::i64, MVT::i64, true); if (NewCost < Cost) { int Diff = Op0->getZExtValue() - Op1->getZExtValue(); SDNode *NewOp = DAG.getConstant(Diff, SDLoc(N), MVT::i64); SDValue Ops[] = { N->getOperand(0), NewOp }; if (DAG.isKnownToBeAPowerOfTwo(Diff)) { return true; } } return false; }" LLVM,RISCV,483, LLVM,RISCV,484,"RISCVTargetLowering::LowerOperation(ISD::NodeType &Node, SelectionDAG &DAG) { switch (Node.Opcode) { case RISCVISD::GlobalAddress: { GlobalAddressSDNode *GAGetNode = cast(Node); SDValue GlobalAddr = DAG.getTargetGlobalAddress(GAGetNode->getGlobal(), Node, 0, MVT::i32); SDValue Result = DAG.getNode(ISD::LOAD, Node, GAGetNode->getValueType(0), DAG.getConstant(0, Node.getValueType(1)), GlobalAddr, DAG.getTargetConstant(0, Node.getValueType(1)), NULL, 0); DAG.ReplaceAllUsesWith(Node, Result); break; } case RISCVISD::BlockAddress: { BlockAddressSDNode *BAGetNode = cast(Node); SDValue GlobalAddr = DAG.getTargetGlobalAddress(BAGetNode->getBlockAddress(), Node, 0, MVT::i32); SDValue Result = DAG.getNode(ISD::ADD, Node, BAGetNode->getValueType(0), GlobalAddr, DAG.getConstant(BAGetNode->getOffset(), Node.getValueType(1)), NULL, 0); DAG.ReplaceAllUsesWith(Node, Result); break; } case RISCVISD::ConstantPool: { ConstantPoolSDNode *CPLoadNode = cast(Node); SDValue GlobalAddr = DAG.getTargetGlobalAddress(CPLoadNode->getConstVal(), Node, 0, CPLoadNode->getValueType(0)); SDValue Result = DAG.getNode(ISD::LOAD, Node, CPLoadNode->getValueType(0), DAG.getConstant(0, Node.getValueType(1)), GlobalAddr, DAG.getTargetConstant(0, Node.getValueType(1)), NULL, 0); DAG.ReplaceAllUsesWith(Node, Result); break; }" LLVM,RISCV,485,"void ReplaceNodeResults(SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) { unsigned Opcode = N->getOpcode(); if (Opcode == RISCVISD::READ_CYCLE_WIDE) { SDValue Op1 = N->getOperand(0); SDValue Op2 = N->getOperand(1); SDValue Op3 = N->getOperand(2); SmallVector NewResults; if (Op1.getValueType() == MVT::i32 && Op2.getValueType() == MVT::Other && Op3.getValueType() == MVT::i32) { ConstantSDNode *Immediate = dyn_cast(Op1); if (Immediate) { const APInt &ImmVal = Immediate->getAPIntValue(); float FImmVal = (float)ImmVal.getZExtValue(); SDValue FVal = DAG.getConstantFP(FImmVal, MVT::f32); SDValue CustomOp = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, SDLoc(N), MVT::i64, FVal); SDValue UnnormVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i32, CustomOp); NewResults.push_back(UnnormVal); NewResults.push_back(Op3); Results.swap(NewResults); return; } } }" LLVM,RISCV,486,OptionSet getSubtarget() const { return is64Bit() ? RI32::RV64I : RI32::RV32I; } LLVM,RISCV,487, RISCVTargetMachine &TM) { if (!N0->getType()->isIntegerTy() || !N1->getType()->isIntegerTy()) { return false; } if (!isa(N0) && !isa(N1)) { return false; } if (TargetFolder(TM).isWidenedConstant(N0) && TargetFolder(TM).isWidenedConstant(N1)) { return false; } if (TargetFolder(TM).isUndef(N0) && TargetFolder(TM).isUndef(N1)) { return false; } if (TM.getRegInfo().isPhysicalRegister(N0)) { return false; } if (TM.getRegInfo().isPhysicalRegister(N1)) { return false; } if (TM.getRegInfo().isInefficientToMaterialize(N0) || TM.getRegInfo().isInefficientToMaterialize(N1)) { return false; } if (TM.getInstrInfo().getNumOfStoredConstantPool(N0) == 0 && TM.getInstrInfo().getNumOfStoredConstantPool(N1) == 0) { return false; } return true; } LLVM,RISCV,488, MVT::SimpleValueType VT) { if (!STI.hasStdExtF() || VT != MVT::f32 || VT != MVT::f64) { return false; } if (VT == MVT::f32 && !llvm::APFloat::isIEEEsingle(FPImm)) { return false; } if (VT == MVT::f64 && !llvm::APFloat::isIEEEdouble(FPImm)) { return false; } return true; } LLVM,RISCV,489,"EVT VT) { assert (Chain.getNode()->getOpcode() == ISD::TokenFactor && ""Unexpected chain in LowerOperation""); if (Op.getOpcode() == RISCVISD::FMV_W_X_RV64) { SDValue NewChain = Op->getOperand(0); SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op->getOperand(1)); return DAG.getNode(RISCVISD::FMV_W_X_RV32, DL, MVT::f32, Trunc, NewChain); } switch (Op.getOpcode()) { default: llvm_unreachable(""Unexpected custom legalisation""); case ISD::GlobalAddress: { const GlobalValue *GV = cast(Op)->getGlobal(); const GlobalVariable *GV0 = dyn_cast(GV); if (GV0 && GV0->isWeakForLinker()) llvm_unreachable(""LowerOperation will not lower linked operands""); SDValue GAHi = DAG.getTargetGlobalAddress(GV, DL, VT, cast(Op)->getOffset(), 0); SDValue SymHi = DAG.getNode(RISCVISD::Hi, DL, MVT::i32, GAHi); if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { SDValue SymLo = DAG.getNode(RISCVISD::Lo, DL, MVT::i32, GAHi); return DAG.getNode(ISD::ADD, DL, VT, SymHi, SymLo, Chain); }" LLVM,RISCV,490, LLVM,RISCV,491,"bool isEligibleForTailCallOptimization(CallInst *Call) { Target LoweredTarget = Call->getTarget(); if(LoweredTarget == ""RISCV"") { TargetFlags DisableFlags = Call->getTargetFlags(); if(DisableFlags[""disable-tail-calls""] == ""true"") { TargetFlags InterruptFlag = Call->getTargetFlags(); if(InterruptFlag[""interrupt""] == 0) { Target SpecificTarget = Call->getTarget(); if(SpecificTarget == ""RISCV"") { Target Architecture = Call->getTarget(); if(Architecture == ""RISCV"") { Target SpecificArchitecture = Call->getTarget(); if(SpecificArchitecture == ""RISCV"") { Target SpecificFeature = Call->getTarget(); if(SpecificFeature == ""16"") { return true; } } } } } } }" LLVM,RISCV,492,bool isDesirableToCommuteWithShift(Instruction* shiftInst) { BinaryOperator* binOp = dyn_cast(shiftInst); Value* shiftOperand = binOp->getOperand(1); if (binOp->getOpcode() != Instruction::Add && binOp->getOpcode() != Instruction::Or) { return false; } Value* otherOperand = binOp->getOperand(0); if (otherOperand == shiftOperand) { otherOperand = binOp->getOperand(1); } if (!isa(otherOperand)) { return false; } LLVM,RISCV,493,"bool isEligibleForTailCallOptimization(const CallInst *CI, const TargetMachine &TM) { if (!TM.getTargetTriple().isArch(""riscv"")) return false; if (CI->hasFnAttr(""interrupt"")) return false; auto *CF = CI->getCaller(); if (!CF) return false; assert(CF->getFunctionType()->getReturnType()->isVoidTy() && ""Tail call optimization requires a void return type""); auto &DL = CI->getModule()->getDataLayout(); const uint64_t CallerSize = DL.getTypeAllocSize(CF->getFunctionType()); if (CI->getDebugLoc()) { MDNode *N = CI->getMetadata(LLVMContext::MD_prof); if (N && (CallerSize < 16 || !N->getOperand(0)->getOperand(3))) { return false; } } auto *LastRets = CF->getFunctionType()->getReturnType(); if (LastRets->isTokenTy()) return false; auto *F = CI->getCalledFunction(); if (!F || !F->hasFnAttribute(""disable-tail-calls"")) return false; StringRef CPU = TM.getTargetCPU(); if (!CPU.startswith(""riscv"")) return false; return true; }" LLVM,RISCV,494, LLVM,RISCV,495, LLVM,RISCV,496,"const char *getTargetNodeName(unsigned Opcode) { switch (Opcode) { default: return NULL; case RISCVISD::NodeType: return ""NodeType""; case RISCVISD::FIRST_NUMBER: return ""FIRST_NUMBER""; case RISCVISD::RET_FLAG: return ""RET_FLAG""; case RISCVISD::CALL: return ""CALL""; case RISCVISD::SELECT_CC: return ""SELECT_CC""; } } ``` The function takes in an unsigned integer Opcode and uses a switch statement to map the value to the corresponding target specific name. If the value does not match any of the cases, it returns NULL. Otherwise, it returns the corresponding target specific name. Sample usage of the function: ``` unsigned Opcode = RISCVISD::RET_FLAG; const char *NodeName = getTargetNodeName(Opcode); if (NodeName) { printf(""Target specific name for Opcode %u is %s"", Opcode, NodeName); } else { printf(""Unknown Opcode %u"", Opcode); }" LLVM,RISCV,497, LLVM,RISCV,498," SelectionDAG &DAG) { SDLoc dl(DAG.getEntryNode()); const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo(); const TargetRegisterClass *RC = TRI->getRegClassFor(MVT::Other); SDValue Arg = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RISCV::X1, MVT::i32); RetChain.push_back(Arg); for (unsigned i = 0, e = Outs.size(); i != e; ++i) { ISD::OutputArg MyArgs = Outs[i]; if (MyArgs.Flags.isReturned()) continue; assert(MyArgs.VT == MVT::Other && ""Non-register return value is not supported!""); SDValue Arg = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RISCV::X2 + i, MVT::i32); RetChain.push_back(Arg); } RetChain.push_back(DAG.getNode(RISCVISD::RET_FLAG, dl, MVT::Other, Arg)); return; }" LLVM,RISCV,499, LLVM,RISCV,500,"unsigned int getNumRegistersForCallingConv(CallingConv::ID callingConv, EVT type) { if (callingConv == CallingConv::RISCV && type == MVT::f16) { return 1; } return 0; }" LLVM,RISCV,501,"RegisterType getRegisterTypeForCallingConv(CallingConv::ID CC, const RISCVTargetMachine &Target) { if (CC == CallingConv::RV32E) { return RegisterType::GPR; } else { if (Target.hasFeature(Feature::F16)) { return RegisterType::FPR; } else if (Target.hasFeature(Feature::F32)) { return RegisterType::FR; } else { return RegisterType::GPR; } } }" LLVM,RISCV,502,"MVT::SimpleValueType getSetCCResultType(const RISCVSubtarget &Subtarget, EVT VT) { if (Subtarget.getTargetTriple().getArch() == Triple::riscv && VT.getSimpleVT() == MVT::i1) { return MVT::i1; } return getTypeToTransformTo(VT.getSimpleVT()); }" LLVM,RISCV,503,"bool hasAndNot(RISCV target, RISCV value) { if(target == RISCV) { if((~value) & value) { return true; } else { return false; } } } int main() { RISCV target; RISCV value; bool result = hasAndNot(target, value); return 0; }" LLVM,RISCV,504,"bool isFPImmLegal(const RISCVSubtarget &STI, EVT VT, const APFloat &Imm) { switch (VT.SimpleTy) { case MVT::f16: return false; case MVT::f32: return Imm.isFloat(); case MVT::f64: return Imm.isDouble(); default: return false; } }" LLVM,RISCV,505,"bool shouldSinkOperands(const Instruction *I, const BasicBlock *BB) { assert(I && ""Invalid instruction""); assert(BB && ""Invalid basic block""); if(I->getOpcode() == Intrinsic::fma && !I->mayHaveSideEffects()) { Value *Op1 = I->getOperand(0); Value *Op2 = I->getOperand(1); Value *Op3 = I->getOperand(2); if(Op1->getParent() == BB && Op2->getParent() == BB && Op3->getParent() == BB) { ConstantFP *C1 = dyn_cast(Op1); ConstantFP *C2 = dyn_cast(Op2); ConstantFP *C3 = dyn_cast(Op3); if(C1 && C2 && C3) { return true; } } } return false; }" LLVM,RISCV,506,"bool allowsMisalignedMemoryAccesses(int targetType, string target){ if(target == ""RISCV,RISCV""){ switch(targetType){ case 1: return true; case 2: return true; case 4: return true; case 8: return true; default: return false; } } else{ return false; } }" LLVM,RISCV,507,"bool CanLowerReturn(const MachineFunction *MF, CallingConv::ID CC, const SmallVectorImpl &Outs, LLVMContext &Context) { if (CC != CallingConv::RISCV && CC != CallingConv::RISCV64) return false; const RISCVABIInfo &ABI = MF->getTarget().getABI(); for (unsigned i = 0, e = Outs.size(); i != e; ++i) { const ISD::OutputArg &Out = Outs[i]; if (Out.VT.isInteger()) { unsigned BitSize = Out.VT.getSizeInBits(); if (BitSize >= ABI.getReturnRegSizeInBits()) return false; } else if (Out.VT.isFloatingPoint()) { unsigned BitSize = Out.VT.getSizeInBits(); if (BitSize >= ABI.getReturnRegSizeInBits()) return false; } else if (Out.VT.isVector()) { EVT EltVT = Out.VT.getVectorElementType(); if (EltVT.isInteger() || EltVT.isFloatingPoint()) { unsigned BitSize = EltVT.getSizeInBits(); if (BitSize >= ABI.getReturnRegSizeInBits()) return false; } else return false; }" LLVM,RISCV,508,"int ComputeNumSignBitsForTargetNode(const SDValue &N, const SelectionDAG &DAG) { unsigned NumSignBits = 0; EVT VT = N.getValueType(); int Opcode = N.getOpcode(); if (Opcode == RISCVISD::SELECT_CC || Opcode == RISCVISD::SLLW || Opcode == RISCVISD::SRAW || Opcode == RISCVISD::SRLW || Opcode == RISCVISD::DIVW || Opcode == RISCVISD::DIVUW || Opcode == RISCVISD::REMUW || Opcode == RISCVISD::ROLW || Opcode == RISCVISD::RORW || Opcode == RISCVISD::GREVW || Opcode == RISCVISD::GORCW || Opcode == RISCVISD::FSLW || Opcode == RISCVISD::FSRW || Opcode == RISCVISD::SHFLW || Opcode == RISCVISD::UNSHFLW || Opcode == RISCVISD::BCOMPRESSW || Opcode == RISCVISD::BDECOMPRESSW || Opcode == RISCVISD::FCVT_W_RTZ_RV64 || Opcode == RISCVISD::FCVT_WU_RTZ_RV64 || Opcode == RISCVISD::SHFL || Opcode == RISCVISD::UNSHFL || Opcode == RISCVISD::VMV_X_S) { if (VT == MVT::i64 || VT == MVT::f64) { NumSignBits = 1; } } return NumSignBits; }" LLVM,RISCV,509,"bool convertSelectOfConstantsToMath(Value *Cond, Constant *C1, Constant *C2, TargetMachine *TM) { if (TM->getTargetTriple().getArch() == Triple::riscv) { if (ConstantInt *CI = dyn_cast(Cond)) { if (CI->isZero()) { return true; } } if (ConstantInt *CI = dyn_cast(Cond)) { if (CI->isOne()) { return true; } }" LLVM,RISCV,510, LLVM,RISCV,511, LLVM,RISCV,512," MachineInstr *MI, MachineBasicBlock *MBB) const { assert(MI->getDesc().isCall() && ""This method should only be called on instructions with the 'usesCustomInserter' flag.""); const RISCVInstrInfo *TII = static_cast(MBB->getTarget().getInstrInfo()); DebugLoc dl = MI->getDebugLoc(); MachineFunction &MF = *MBB->getParent(); RISCVInstructionSelector InstSelector(*this, TII); MachineInstr *Res = InstSelector.Select(MF, MI, dl); if (!Res) { MI->emitError(""Unexpected instr type to insert""); return; } if (!DisableCCRegToRegCopy) { const RISCVRegisterInfo *TRI = static_cast(MBB->getTarget().getRegisterInfo()); TRI->eliminateFrameIndex(MF, Res, Res->getDebugLoc()); TRI->rewriteCCRegOperands(MF, Res); } if (Res->getOpcode() == RISCV::BuildPairF64Pseudo) { Res = TII->splitReg(MBB, MI, Res, RISCV::SplitF64Pseudo, dl); }" LLVM,RISCV,513,"void emitLeadingFence(RISCV, RISCV) { if (RISCV == ""RISCV32"") { emit(Instruction1); emit(Instruction2); emit(Instruction3); } else if (RISCV == ""RISCV64"") { emit(Instruction4); emit(Instruction5); emit(Instruction6); } else { emit(ErrorInstruction); } emit(RISCV); emit(RISCV); emit(RISCV); }" LLVM,RISCV,514, LLVM,RISCV,515, LLVM,RISCV,516,"unsigned getAddr(const MachineInstr *MI) { unsigned Addr; switch (MI->getOpcode()) { case RISCV::PseudoLLA: Addr = (uint32_t) MI->getOperand(0).getImm() + (uint32_t) MI->getOperand(1).getImm(); break; case RISCV::PseudoLA: Addr = (uint32_t) MI->getOperand(0).getImm() + MI->getOperand(1).getImm(); break; default: errs() << ""Unsupported code model for lowering\n""; return 0; } return Addr; }" LLVM,RISCV,517, LLVM,RISCV,518,llvm::Register getExceptionPointerRegister(const llvm::MachineFunction &MF) { const llvm::TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); if (TII->getRegisterInfo().isPhysicalRegister(RISCV::X10)) { return llvm::Register::fromReg(RISCV::X10); } return llvm::Register::fromReg(0); } LLVM,RISCV,519, LLVM,RISCV,520,"ISD::ExtendType getExtendForAtomicCmpSwapArg(RISCVTargetMachine &TM, ISD::NodeType CmpSwapType) { if (!TM.getSubtarget().hasStdExtA()) { return ISD::ANY_EXTEND; } if (CmpSwapType == ISD::SIGN_EXTEND) { return ISD::SIGN_EXTEND; } return ISD::ZERO_EXTEND; }" LLVM,RISCV,521,"llvm::ISD::NodeType getExtendForAtomicOps(llvm::RISCV::AtomicOps op) { switch(op) { case llvm::RISCV::AtomicOps::ZERO_EXTEND: return llvm::ISD::ZERO_EXTEND; case llvm::RISCV::AtomicOps::SIGN_EXTEND: return llvm::ISD::SIGN_EXTEND; case llvm::RISCV::AtomicOps::ANY_EXTEND: return llvm::ISD::ANY_EXTEND; default: llvm_unreachable(""Invalid RISCV AtomicOp specified!""); } }" LLVM,RISCV,522,"int getNumRegistersForCallingConv(CallingConv::ID CC, EVT VT) { if (!Subtarget->isRISCV()) report_fatal_error(""This function is only supported for RISCV target""); switch (CC) { case CallingConv::RISCV: switch (VT.getSimpleVT().SimpleTy) { case MVT::f16: return 1; }" LLVM,RISCV,523,"unsigned getPrefTypeAlign(Type *Ty, const DataLayout &DL, unsigned RISCV) { unsigned PrefStack = DL.getPrefTypeAlignment(Ty); unsigned PrefStackBytes = RISCV / 8; if (PrefStack < PrefStackBytes) return PrefStackBytes; return PrefStack; }" LLVM,RISCV,524,"llvm::Register getRegisterByName(llvm::StringRef Name) { llvm::RISCV::Register RegisterID = llvm::RISCV::NoRegister; if (Name.startswith(""x"")) { StringRef NumStr = Name.drop_front(1); uint64_t Num; if (!NumStr.getAsInteger(10, Num) && Num >= 0 && Num <= 31) { RegisterID = llvm::RISCV::X0 + Num; } } else if (Name.startswith(""f"")) { StringRef NumStr = Name.drop_front(1); uint64_t Num; if (!NumStr.getAsInteger(10, Num) && Num >= 0 && Num <= 31) { RegisterID = llvm::RISCV::F0 + Num; } } else if (Name.startswith(""a"")) { StringRef NumStr = Name.drop_front(1); uint64_t Num; if (!NumStr.getAsInteger(10, Num) && Num >= 0 && Num <= 7) { RegisterID = llvm::RISCV::A0 + Num; } } else if (Name.startswith(""tp"")) { RegisterID = llvm::RISCV::TP; } else if (Name.startswith(""gp"")) { RegisterID = llvm::RISCV::GP; } else if (Name.startswith(""sp"")) { RegisterID = llvm::RISCV::SP; } else if (Name.startswith(""ra"")) { RegisterID = llvm::RISCV::RA; } if (RegisterID != llvm::RISCV::NoRegister && RegisterID >= llvm::RISCV::FirstReservedRegister && RegisterID <= llvm::RISCV::LastReservedRegister) { errs() << ""Error: Trying to obtain reserved register \"""" << Name << ""\""."" << ""\n""; return llvm::RISCV::NoRegister; }" LLVM,RISCV,525, LLVM,RISCV,526,"MVT::SimpleValueType getSetCCResultType(MVT::SimpleValueType OpVT1, MVT::SimpleValueType OpVT2) { if (OpVT1 != MVT::i1 || OpVT2 != MVT::i1) { return MVT::i32; } return MVT::i1; }" LLVM,RISCV,527,"Value *getTargetNode(const MachineInstr *MI, const RISCVtargetInstrInfo *TII, const RISCVSubtarget *ST) { if (MI->isBranch()) { uint64_t targetAddress = MI->getOperand(1).getImm(); Value *targetNode = ConstantInt::get(IntegerType::get(TargetMachineDataLayout.getIntPtrType(*ST->getLLVMTargetMachine()->getDataLayout())), targetAddress); return targetNode; } else if (MI->isCall() || MI->isReturn()) { std::string functionName = TII->getCallOpcodeToName(MI->getOpcode()); GlobalValue *targetFunction = new GlobalVariable(MI->getModule(), IntegerType::get(TargetMachineDataLayout.getIntPtrType(*ST->getLLVMTargetMachine()->getDataLayout())), false, GlobalValue::ExternalLinkage, nullptr, functionName); return targetFunction; } else { return nullptr; } }" LLVM,RISCV,528, LLVM,RISCV,529,"EVT getVPExplicitVectorLengthTy(TargetSubtargetInfo STI) { StringRef TargetID = STI.getTargetTriple().getArch(); if (TargetID == ""riscv32"") { return EVT::getIntegerVT(32); } else if (TargetID == ""riscv64"") { return EVT::getIntegerVT(64); } else { return EVT(); } }" LLVM,RISCV,530, LLVM,RISCV,531, LLVM,RISCV,532, LLVM,RISCV,533,"bool isEligibleForTailCallOptimization(CallInst* CI, TargetMachine* TM) { if (TM->getTargetTriple().getArch() != Triple::RISCV) return false; if (!CI->isDirectCall()) return false; Function* F = CI->getCalledFunction(); if (!F->hasInternalLinkage()) return false; if (F->getName() == ""interrupt"") return false; if (!F->getReturnType()->isVoidTy()) return false; return true; }" LLVM,RISCV,534,bool isFMAFasterThanFMulAndFAdd() { if (Target.getID() == Target::RISCV) { if (DstTy == MVT::f16 || DstTy == MVT::f32 || DstTy == MVT::f64) { if (Target.getFMAOpLatency() < Target.getFpuShuffleOpLatency()) { return true; } } } return false; } LLVM,RISCV,535,"bool isFPImmLegal(MVT::SimpleValueType VT, const RISCVSubtarget &STI) { if (VT != MVT::f16 && VT != MVT::f32 && VT != MVT::f64) return false; switch (VT) { case MVT::f16: return STI.hasF16(); case MVT::f32: return STI.hasF32(); case MVT::f64: return STI.hasF64(); } return false; }" LLVM,RISCV,536,"bool isMulAddWithConstProfitable(int RISCV, int RISCV, int c1, int c2, int target_specific_value1, int target_specific_value2) { if (target_specific_value1 != RISCV || target_specific_value2 != RISCV) { return false; } if ((c2 & (c2 - 1)) != 0) { return false; } if (c1 * c2 < c1 || c1 * c2 < c2) { return false; } if (c1 * c2 > (1 << RISCV) - 1) { return false; } if (x->containsOnlyUsesOf(Arg) && (c1 & (1 << RISCV) - 1) == c1 && (c2 & (1 << RISCV) - 1) == c2) { return true; } return false; }" LLVM,RISCV,537, LLVM,RISCV,538,"Value * joinRegisterPartsIntoValue(SelectionDAG &DAG, unsigned Reg, EVT ValueVT, const SDLoc &DL, ArrayRef RegParts, const SDValue &Chain) { if (RegParts.size() == 0) { return DAG.getTargetConstant(""The number of elements should not be zero."", MVT::i32); } unsigned numParts = RegParts.size(); SDValue firstPart = RegParts[0]; if (ValueVT != firstPart.getValueType()) { for (unsigned i = 0; i < numParts; ++i) { RegParts[i] = DAG.getNode(ISD::BITCAST, DL, ValueVT, RegParts[i]); } }" LLVM,RISCV,539, LLVM,RISCV,540,bool RISCVTargetMachine::mayBeEmittedAsTailCall(CallInst *CI) { if (!CI) { return false; } Value *TargetValue = CI->getArgOperand(0); if (isa(TargetValue)) { ConstantInt *TargetInt = cast(TargetValue); if (TargetInt->getValue().equals(RISCV)) { return true; } } return false; } LLVM,RISCV,541, LLVM,RISCV,542,"bool shouldExtendTypeInLibCall(RISCV, RISCV, RISCVABI::ABI, RISCVABI::ABI_LP64, MVT::f32) { if (RISCV != RISCV) { return false; } if (RISCVABI::ABI == RISCVABI::ABI || RISCVABI::ABI == RISCVABI::ABI_LP64) { if (MVT::f32 == MVT::f32) { return true; } } return false; }" LLVM,RISCV,543,bool shouldSignExtendTypeInLibCall(RISCV::ValueType VT) { return VT == RISCV::MVT::i32; } LLVM,RISCV,544,bool shouldSinkOperands(MachineInstr *I) { if (I->getOpcode() == RISCV::FMADD || I->getOpcode() == RISCV::FMSUB || I->getOpcode() == RISCV::FMADDS || I->getOpcode() == RISCV::FMSUBS) { if (I->getNumOperands() == 3) { if (I->getOperand(0).getParent() == I->getOperand(1).getParent() && I->getOperand(0).getParent() == I->getOperand(2).getParent()) { if (!I->getOperand(0).isTied() && !I->getOperand(1).isTied() && !I->getOperand(2).isTied()) { return !I->getOperand(0).getParent()->isLiveIn(I->getOperand(0).getReg()) && !I->getOperand(1).getParent()->isLiveIn(I->getOperand(1).getReg()) && !I->getOperand(2).getParent()->isLiveIn(I->getOperand(2).getReg()); } } } } return false; } LLVM,RISCV,545, LLVM,RISCV,546,"void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI) { if (MI->getDesc().hasPostISelHook()) { unsigned Opcode = MI->getOpcode(); unsigned RISCVOpCodes = RISCV::getNamedOperandIdx(Opcode, RISCV::OpName); if (RISCVOpCodes != 0) { unsigned RoundingMode = RISCVFPRndMode::DYN; unsigned FrameReg = RISCV::FRM; MI->addOperand(MachineOperand::CreateImm(RISCVOpCodes)); MI->addOperand(MachineOperand::CreateImm(RoundingMode)); MI->addOperand(MachineOperand::CreateImm(FrameReg)); } } }" LLVM,RISCV,547, LLVM,RISCV,548, LLVM,RISCV,549, LLVM,RISCV,550,unsigned int getJumpTableEncoding() { unsigned int encoding = 0; if (getTargetArchitecture() == RISCV) { encoding = RISCV; } return encoding; } LLVM,RISCV,551,bool getTgtMemIntrinsic(unsigned IntrCode) { switch(IntrCode) { case Intrinsic::riscv_masked_atomicrmw_xchg_i32: case Intrinsic::riscv_masked_atomicrmw_add_i32: case Intrinsic::riscv_masked_atomicrmw_sub_i32: case Intrinsic::riscv_masked_atomicrmw_nand_i32: case Intrinsic::riscv_masked_atomicrmw_max_i32: case Intrinsic::riscv_masked_atomicrmw_min_i32: case Intrinsic::riscv_masked_atomicrmw_umax_i32: case Intrinsic::riscv_masked_atomicrmw_umin_i32: case Intrinsic::riscv_masked_cmpxchg_i32: case ISD::INTRINSIC_W_CHAIN: case Intrinsic::riscv_masked_strided_load: case Intrinsic::riscv_masked_strided_store: return true; default: return false; } } LLVM,RISCV,552,bool hasAndNotCompare() { if (Triple.getArch() != Triple::riscv) { return false; } const MCSubtargetInfo &STI = getSTI(); if (!STI.hasRISCV()) { return false; } if (!STI.getFeatureBits().RISCVHardFloat) { return false; } if (!STI.getFeatureBits().RISCV64) { return false; } return true; } LLVM,RISCV,553,"bool isFPImmLegal(unsigned RISCV, MVT::f16, MVT::f32, MVT::f64) { if (RISCV != 1) return false; if (RISCV.getMVT(MVT::f16) != 1 || RISCV.getMVT(MVT::f32) != 1 || RISCV.getMVT(MVT::f64) != 1) return false; return true; }" LLVM,RISCV,554,"bool RISCVTargetLowering::isZExtFree(const SDNode *Val, EVT VT2) const { if (VT2 == Val->getValueType(0)) { return true; } if (ConstantSDNode *ConstVal = dyn_cast(Val)) { if (ConstVal->getAPIntValue().getZExtValue() < VT2.getSizeInBits()) { return true; } } if (Val->getOpcode() == ISD::NON_EXTLOAD && Val->getValueType(0) == VT2) { return true; } if (Val->getOpcode() == ISD::ZEXTLOAD) { if (Val->getOperand(0)->getOpcode() == ISD::NON_EXTLOAD && Val->getOperand(0)->getValueType(0) == VT2) { return true; } if (Val->getOperand(0)->getValueType(0) == MVT::i8 || Val->getOperand(0)->getValueType(0) == MVT::i16) { if (VT2 == MVT::i8 || VT2 == MVT::i16) { return true; } } if (Val->getOperand(0)->getValueType(0) == MVT::i8 || Val->getOperand(0)->getValueType(0) == MVT::i16) { if (VT2 == MVT::i32) { return true; } } }" LLVM,RISCV,555, LLVM,RISCV,556,"bool shouldConvertFpToSat(MVT::SimpleValueType FPVT, MVT::SimpleValueType VT) { if (FPVT != MVT::f16 && FPVT != MVT::f32 && FPVT != MVT::f64) { return false; } if (VT != MVT::f16 && VT != MVT::f32 && VT != MVT::f64) { return false; } if (VT < FPVT) { return false; } if (VT != (FPVT << Log2_32_Ceil(VT / FPVT))) { return false; } return true; }" LLVM,RISCV,557, LLVM,RISCV,558,"Value* emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder, AtomicRMWInst *AI, Value* Ptr, Value* Val, Value* Mask, Value* Order, unsigned AtomicSizeInBits) { if (AI->getModule()->getTargetTriple().isRISCVELF()) { auto &ctx = AI->getContext(); const char *Target = ""RISCV""; SmallString<256> Name(""llvm.masked.atomicrmw.""); Name.append(Target); Name.push_back('.'); InitType *TruncTy = TypeBuilder::get(ctx); Name.append(TruncTy->getName()); Name.push_back('v'); FunctionType *MaskedAtomicRMWDeclType = Intrinsic::getDeclaration(AI->getParent()->getParent()->getModule().get(), Intrinsic::riscv_masked_atomicrmw, TruncTy); FunctionCallee Func = AI->getParent()->getParent()->getModule().getOrInsertFunction(Name.c_str(), MaskedAtomicRMWDeclType); ArrayRef Args = {Ptr, Val, Mask, Order}; return Builder.CreateCall(Func, Args); } return NULL; }" LLVM,RISCV,559,bool getTgtMemIntrinsic(unsigned IID) { switch (IID) { case Intrinsic::riscv_masked_atomicrmw_xchg_i32: case Intrinsic::riscv_masked_atomicrmw_add_i32: case Intrinsic::riscv_masked_atomicrmw_sub_i32: case Intrinsic::riscv_masked_atomicrmw_nand_i32: case Intrinsic::riscv_masked_atomicrmw_max_i32: case Intrinsic::riscv_masked_atomicrmw_min_i32: case Intrinsic::riscv_masked_atomicrmw_umax_i32: case Intrinsic::riscv_masked_atomicrmw_umin_i32: return true; default: return false; } } LLVM,RISCV,560, LLVM,RISCV,561,"string getTargetNodeName(RISCVISD::NodeType nodeType) { switch (nodeType) { case RISCVISD::FIRST_NUMBER: return ""RISCVISD::FIRST_NUMBER""; case RISCVISD::RET_FLAG: return ""RISCVISD::RET_FLAG""; case RISCVISD::URET_FLAG: return ""RISCVISD::URET_FLAG""; case RISCVISD::SRET_FLAG: return ""RISCVISD::SRET_FLAG""; case RISCVISD::MRET_FLAG: return ""RISCVISD::MRET_FLAG""; case RISCVISD::CALL: return ""RISCVISD::CALL""; case RISCVISD::SELECT_CC: return ""RISCVISD::SELECT_CC""; case RISCVISD::BuildPairF64: return ""RISCVISD::BuildPairF64""; case RISCVISD::SplitF64: return ""RISCVISD::SplitF64""; case RISCVISD::TAIL: return ""RISCVISD::TAIL""; case RISCVISD::SLLW: return ""RISCVISD::SLLW""; case RISCVISD::SRAW: return ""RISCVISD::SRAW""; case RISCVISD::SRLW: return ""RISCVISD::SRLW""; case RISCVISD::DIVW: return ""RISCVISD::DIVW""; case RISCVISD::DIVUW: return ""RISCVISD::DIVUW""; case RISCVISD::REMUW: return ""RISCVISD::REMUW""; default: return ""RISCV""; } }" LLVM,RISCV,562, N->getOpcode() != RISCVISD::SRLW) { return; } if (N->getOperand(0).getValueType().getSizeInBits() != N->getOperand(1).getValueType().getSizeInBits()) { return; } if (N->getOperand(0).getValueType() != MVT::i32 || N->getOperand(1).getValueType() != MVT::i32) { return; } if (N->getValueType(0) != MVT::i32) { return; } LLVM,RISCV,563, LLVM,RISCV,564,"void emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> &Builder, Value *Ptr, Value *Mask, Value *NewVal, Value *CmpVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering) { LLVMContext& Ctx = Builder.getContext(); FunctionType *FT = FunctionType::get(Builder.getInt32Ty(), {Builder.getInt8PtrTy(), Builder.getInt8Ty(), Builder.getInt8Ty(), Builder.getInt32Ty(), Builder.getInt32Ty(), Builder.getInt32Ty(), Builder.getInt32Ty(), Builder.getInt8Ty()}, false); Function* Func = Intrinsic::getDeclaration(Builder.GetInsertBlock()->getParent()->getParent(), Intrinsic::riscv_masked_cmpxchg_i32); CallInst* CI = Builder.CreateCall( Func, { Ptr, Mask, NewVal, CmpVal, Builder.getInt32(SuccessOrdering), Builder.getInt32(FailureOrdering), Builder.getInt8(0)}); CI->setCallingConv(CallingConv::C); }" LLVM,RISCV,565, LLVM,RISCV,566, LLVM,RISCV,567, LLVM,RISCV,568,bool getTgtMemIntrinsic(const CallInst *CI) { unsigned IID = CI->getIntrinsicID(); if(IID == Intrinsic::riscv_masked_atomicrmw_xchg_i32 || IID == Intrinsic::riscv_masked_atomicrmw_add_i32 || IID == Intrinsic::riscv_masked_atomicrmw_sub_i32 || IID == Intrinsic::riscv_masked_atomicrmw_nand_i32 || IID == Intrinsic::riscv_masked_atomicrmw_max_i32 || IID == Intrinsic::riscv_masked_atomicrmw_min_i32 || IID == Intrinsic::riscv_masked_atomicrmw_umax_i32 || IID == Intrinsic::riscv_masked_atomicrmw_umin_i32 || IID == Intrinsic::riscv_masked_cmpxchg_i32) { return true; } return false; } LLVM,RISCV,569, LLVM,RISCV,570,"VLMask RISCVISD::getAllOnesMask () { SDValue VLMask = CurDAG->getRegister(RISCV::VM, MVT::nxv64); SDValue MVT = CurDAG->getRegisterMask(EVT::getVectorVT(*CurDAG->getContext(), MVT::i1, NumElts)); MVT->setRISCVMVF(RISCVISD::VMSET_VL); return VLMask; }" LLVM,RISCV,571,"bool isLegalAddressingMode(const MachineFunction &MF, const MachineInstr &MI, const MachineOperand &MO) { unsigned LoadStoreType = MI.getOpcode(); if (MO.getTargetFlags() == RISCV::RISCV || MO.getTargetFlags() == RISCV::RISCV || MO.getTargetFlags() == 0 || MO.getTargetFlags() == 12 || MO.getTargetFlags() == 0 || MO.getTargetFlags() == 1) { return RISCV::isLegalLoadStoreType(LoadStoreType); } return false; }" LLVM,RISCV,572,"bool isOffsetFoldingLegal(const GlobalAddress *GA, const ConstantInt *Offset) { if (GA->getTargetNodeName() != ""RISCV"") return false; if (!isa(Offset)) return false; uint64_t absoluteAddr = GA->getAddress() + Offset->getZExtValue(); if (absoluteAddr < MinSupportAddress || absoluteAddr > MaxSupportAddress) return false; if (absoluteAddr % 4 != 0) return false; return true; }" LLVM,RISCV,573,"bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(X, Y, C) { if (isConstant(X)) { return false; } if (isAnd(X)) { if (isSRL(X.getOperand(1)) && X.getOperand(1).getOperand(1) == Y) { if (isConstant(C)) { if (C == X.getOperand(1).getOperand(2)) { return true; } } } } return false; }" LLVM,RISCV,574,"bool signExtendConstant(const TargetMachine &TM, Constant *C) { if (TM.getTargetTriple().getArch() != llvm::Triple::riscv) { return false; } uint64_t targetSpecificValue = TM.getTargetTriple().getArchName() == ""riscv32"" ? 32 : 64; if (!isa(C)) { return false; } ConstantInt *CI = cast(C); if (CI->getBitWidth() < targetSpecificValue) { if (CI->isSigned()) { CI = cast(CI->getSExtValue(targetSpecificValue)); return true; } } return false; }" LLVM,RISCV,575,"bool allowsMisalignedMemoryAccesses(Type *Ty, const Triple & , unsigned ) { if (Ty->isIntegerTy() || Ty->isPointerTy()) { return true; } else { return false; } }" LLVM,RISCV,576,unsigned ComputeNumSignBitsForTargetNode(unsigned Opc) { switch (Opc) { default: return 0; case RISCVISD::SLLW: case RISCVISD::SRAW: case RISCVISD::SRLW: case RISCVISD::DIVW: case RISCVISD::DIVUW: case RISCVISD::REMUW: case RISCVISD::ROLW: case RISCVISD::RORW: case RISCVISD::GREVW: case RISCVISD::GORCW: case RISCVISD::FSLW: case RISCVISD::FSRW: case RISCVISD::SHFLW: case RISCVISD::UNSHFLW: case RISCVISD::BCOMPRESSW: case RISCVISD::BDECOMPRESSW: return 32; case RISCVISD::SHFL: case RISCVISD::UNSHFL: return 33; case RISCVISD::VMV_X_S: return 1; } } LLVM,RISCV,577, LLVM,RISCV,578,"void RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { RISCV::CustomInserterInfo *InserterInfo = (RISCV::CustomInserterInfo *)MI->getOperand(MI->getNumOperands() - 1).getTargetFlags(); unsigned Opc = MI->getOperand(0).getTargetFlags(); MachineInstrBuilder MIB(MBB->getParent()->getParent(), MI); RISCV::RegisterStoreFlagsRegMask(); MachineFunction &MF = *MBB->getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const TargetRegisterClass &GPRRegClass = RISCV::G8RCRegClass; const TargetRegisterClass &FPR16RegClass = RISCV::F16RCRegClass; const TargetRegisterClass &FPR32RegClass = RISCV::F32RCRegClass; const TargetRegisterClass &FPR64RegClass = RISCV::F64RCRegClass; MachineOperand Op1 = MI->getOperand(1); MachineOperand Op2 = MI->getOperand(2); MachineOperand Op3 = MI->getOperand(3); StringRef ErrMsg; switch(Opc) { case RISCV::ReadCycleWide: { if (!(Op1.isReg() && Op2.isReg() && Op3.isImm())) { ErrMsg = ""Unexpected instr type to insert""; LLVM_DEBUG(dbgs() << ErrMsg << ""\n""); MIB.addImm(RISCV::CustomInserterInfo::ErrOpcode); return; }" LLVM,RISCV,579, LLVM,RISCV,580, LLVM,RISCV,581, LLVM,RISCV,582,"StringRef getConstraintType(StringRef Constraint) { if (Constraint == ""m"") { return ""RISCV""; } else if (Constraint == ""r"") { return ""RISCV""; } else if (Constraint == ""f"") { return ""RISCV""; } else if (Constraint == ""i"") { return ""1""; } else if (Constraint == ""mbr"") { return ""0""; } else { return """"; } }" LLVM,RISCV,583, LLVM,RISCV,584,"StringRef getTargetNodeName(unsigned Opcode) { switch (Opcode) { case RISCVISD::NodeType: return ""RISCV""; case RISCVISD::FIRST_NUMBER: return ""RISCV""; case RISCVISD::RET_FLAG: return ""RISCVISD::RET_FLAG""; case RISCVISD::URET_FLAG: return ""RISCVISD::URET_FLAG""; case RISCVISD::SRET_FLAG: return ""RISCVISD::SRET_FLAG""; case RISCVISD::MRET_FLAG: return ""RISCVISD::MRET_FLAG""; case RISCVISD::CALL: return ""RISCVISD::CALL""; case RISCVISD::SELECT_CC: return ""RISCVISD::SELECT_CC""; case RISCVISD::BuildPairF64: return ""RISCVISD::BuildPairF64""; case RISCVISD::SplitF64: return ""RISCVISD::SplitF64""; case RISCVISD::TAIL: return ""RISCVISD::TAIL""; case RISCVISD::SLLW: return ""RISCVISD::SLLW""; case RISCVISD::SRAW: return ""RISCVISD::SRAW""; case RISCVISD::SRLW: return ""RISCVISD::SRLW""; case RISCVISD::DIVW: return ""RISCVISD::DIVW""; case RISCVISD::DIVUW: return ""RISCVISD::DIVUW""; case RISCVISD::REMUW: return ""RISCVISD::REMUW""; case RISCVISD::FMV_W_X_RV64: return ""RISCVISD::FMV_W_X_RV64""; case RISCVISD::FMV_X_ANYEXTW_RV64: return ""RISCVISD::FMV_X_ANYEXTW_RV64""; default: llvm_unreachable(""Invalid target specific value""); } }" LLVM,RISCV,585, LLVM,RISCV,586,"void ReplaceNodeResults (SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) { if (N->getValueType(0) == MVT::i32) { unsigned Opcode = N->getOpcode(); if (Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) { SDValue NewVal = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, SDLoc(N), MVT::i64, Results[0]); SDValue NewVal2 = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i32, NewVal); Results[0] = NewVal2; } else if (Opcode == ISD::SDIV || Opcode == ISD::UDIV || Opcode == ISD::UREM) { SDValue NewVal = DAG.getNode(ISD::BITCAST, SDLoc(N), MVT::i32, Results[0]); SDValue NewVal2 = DAG.getNode(ISD::Constant, SDLoc(N), MVT::i32, 1); Results[0] = NewVal; Results[1] = NewVal2; } else if (Opcode == ISD::BITCAST) { SDValue NewVal2 = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, SDLoc(N), MVT::i64, Results[0]); Results[0] = NewVal2; }" LLVM,RISCV,587, LLVMContext &Context) { if (CallConv != CallingConv::RISCV) { return false; } const RISCVSubtarget &Subtarget = MF.getSubtarget(); const RISCVABIInfo &ABI = Subtarget.getABI(); for (auto &Arg : Outs) { switch(Arg.Flags.getByValSize()) { case 0: if (!ABI.IsLegalReturnReg(Arg.VT)) { return false; } break; default: if (!ABI.IsLegalReturnRegByVal(Arg.VT)) { return false; } break; } } return true; } LLVM,RISCV,588, LLVM,RISCV,589, LLVM,RISCV,590,"bool EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock::iterator I, ArrayRef MOs, const DebugLoc &DL, SmallVectorImpl &TmpLocs, unsigned SEWIndex, MCRegister Reg, const MCInstrDesc &MCID) { if (!MI->getDesc().usesCustomInserter()) { return false; } if (SEWIndex < 0) { return false; }" LLVM,RISCV,591,"std::string getTargetNodeName(RISCVISD::NodeType Node) { switch (Node) { case RISCVISD::NODE: return ""RISCVISD::NODE""; case RISCVISD::NodeType: return ""RISCVISD::NodeType""; case RISCVISD::FIRST_NUMBER: return ""RISCVISD::FIRST_NUMBER""; default: return ""Unknown Target Node Name""; } }" LLVM,RISCV,592,"getAddr(const MachineInstr *MI) { if (MI->getOpcode() != RISCV::PseudoLLA) return nullptr; MachineOperand AddrOp = MI->getOperand(0); MachineOperand BaseOp = MI->getOperand(1); MachineOperand DispOp = MI->getOperand(2); if (!AddrOp.isReg() || !BaseOp.isReg() || !DispOp.isImm()) return nullptr; unsigned int CodeModel = MI->getOperand(3).getImm(); unsigned int MO_HI = MI->getOperand(4).getImm(); unsigned int MO_LO = MI->getOperand(5).getImm(); unsigned int LUI = MI->getOperand(6).getImm(); unsigned int ADDI = MI->getOperand(7).getImm(); if (CodeModel != RISCV::PseudoLA) return nullptr; auto Addr = MachineOperand(MI->getMF()->getRegInfo(), 0, false, RISCV::GPRRegClass, MI->getOperand(0).getReg()); auto Base = MachineOperand(MI->getMF()->getRegInfo(), 0, false, RISCV::GPRRegClass, MI->getOperand(1).getReg()); auto Disp = MachineOperand::CreateImm(MI->getOperand(2).getImm()); Addr.Op = RISCV::LUI; Addr.addReg(Base.getReg()); Disp.Op = RISCV::ADDI; Disp.addReg(Disp.getReg()); Addr.Op = RISCV::PseudoLLA; return Addr; }" LLVM,RISCV,593,bool getTgtMemIntrinsic(Intrinsic *Int) { if (Int is one of the target specific values) { return true; } else if (Int is an ISD::INTRINSIC_W_CHAIN intrinsic) { Value *IOf = Int->getOperand(5); if (IOf == 1) { return true; } else { return false; } } else { return false; } } LLVM,RISCV,594,bool hasBitTest(const TargetMachine &TM) { switch (TM.getTargetTriple().getArch()) { case Triple::riscv32: case Triple::riscv64: case Triple::riscv128: return true; default: return false; } } LLVM,RISCV,595,"bool isMulAddWithConstProfitable(const TargetMachine &TM, Value *Mul, Value *Add, ConstantInt *C1, ConstantInt *C2){ if (TM.getTargetTriple().getArch() != ""riscv64"" || TM.getTargetTriple().getOS() != ""riscv""){ return false; } if (!Mul->getType()->isIntegerTy() || Mul->getType() != Add->getType() || !C1->getType()->isIntegerTy() || !C2->getType()->isIntegerTy()){ return false; }" LLVM,RISCV,596,"bool isShuffleMaskLegal(VectorType type, VectorMask mask, TargetSpecificValue ts_val) { if (type != RISCV) return false; for (int i = 0; i < mask.size(); i++) { if (mask[i] < 0 || mask[i] >= ts_val) return false; } return true; }" LLVM,RISCV,597," MachineBasicBlock::iterator I) const { if (!MI->getDesc().usesCustomInserter()) return; RISCVVectorPseudoInfo *PseudoInfo = RISCVVectorPseudosTable::getRISCVVectorPseudoInfo(MI->getOpcode()); RISCVVectorSEW SEW = RISCVVectorPseudosTable::getRISCVVectorPseudoSEW(PseudoInfo->Encoding); RISCVVectorLMUL VLMUL = RISCVVectorPseudosTable::getRISCVVectorPseudoLMUL(PseudoInfo->Encoding); if (SEW == RISCVVectorSEW::SEWUNKNOWN || VLMUL == RISCVVectorLMUL::VLMULUNKNOWN) { emitError(""Unknown SEW or VLMUL value!""); return; }" LLVM,RISCV,598, LLVM,RISCV,599, LLVM,RISCV,600, LLVM,RISCV,601, LLVM,RISCV,602, LLVM,RISCV,603,static std::string getAddr(const Instruction *I) { const MachineInstr *MI = cast(I); const MachineFunction *MF = MI->getParent()->getParent(); const RISCVInstrInfo *TII = MF->getSubtarget().getInstrInfo(); MachineOperand BaseOp = MI->getOperand(0); MachineOperand OffsetOp = MI->getOperand(1); bool isCheriPureCapABI = RISCVABI::isCheriPureCapABI(*MF); if (MI->getOpcode() == RISCV::PseudoCLGC || MI->getOpcode() == RISCV::PseudoCLLC) { MachineOperand BaseReg = isCheriPureCapABI ? MI->getOperand(1) : MI->getOperand(0); if (BaseReg.isReg() && TargetRegisterInfo::isVirtualRegister(BaseReg.getReg())) { const char *RegName = TII->getRegisterName(BaseReg.getReg()); return RegName; } } if (MI->getOpcode() == RISCV::PseudoLLA || MI->getOpcode() == RISCV::PseudoLA) { if (BaseOp.isReg() && TargetRegisterInfo::isVirtualRegister(BaseOp.getReg())) { const char *RegName = TII->getRegisterName(BaseOp.getReg()); return RegName; } } LLVM,RISCV,604,"const char* getConstraintType(char c){ switch(c){ case 'R': return ""RISCV""; case '1': return ""1""; case '0': return ""0""; default: return ""UNKNOWN""; } }" LLVM,RISCV,605,llvm::Register getExceptionPointerRegister(MachineFunction &MF) { const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); if(RISCV::isCheriPureCapABI()) { return RISCV::C10; } else { return RISCV::X10; } } LLVM,RISCV,606, llvm::EVT CapSize) { llvm::TypeSize TySize = DL.getTypeSizeInBits(Ty); unsigned AlignedSize = DL.getTypeAllocSize(Ty); if (TySize.isZero()) { return llvm::EVT::Other; } if (TySize.getKnownMinValue() == 32) { return llvm::EVT::i32; } else if (TySize.getKnownMinValue() == 64) { return llvm::EVT::i64; } else { return llvm::EVT::Other; } } LLVM,RISCV,607,"EVT getSetCCResultType(SelectionDAG &DAG, EVT VT) { if (VT == MVT::i1) { return VT; } else { switch (VT.getSimpleVT()) { case MVT::i32: case MVT::i64: return MVT::i1; default: return EVT(); } } }" LLVM,RISCV,608,RISCVTargetNode* getTargetNode(Edge* edge) { RISCVTargetValue* riscvValue = edge->getTargetSpecificValue(); RISCVTargetNode* riscvNode = new RISCVTargetNode(); riscvNode->setTargetValue(riscvValue); return riscvNode; } LLVM,RISCV,609, LLVM,RISCV,610,"bool shouldExpandAtomicRMWInIR(int Opcode, PointerType T, int OperandSize, ABIType ABI) { if (T == RISCV && ABI == RISCVABI::isCheriPureCapABI) { return false; } if (T != RISCV) { return true; }" LLVM,RISCV,611,bool shouldExtendTypeInLibCall(RISCVISD::NodeTy Node) { if (Node == RISCVISD::RISCV || Node == RISCVISD::RISCVABI::ABI || Node == RISCVISD::RISCVABI::ABI_LP64 || Node == RISCVISD::RISCVABI::ABI_L64PC128 || Node == MVT::f32) { return true; } else { return false; } } LLVM,RISCV,612,"int getRegisterByName(StringRef Name, const TargetRegisterInfo *TRI) { if (!isValidRegisterName(Name)) { errs() << ""Invalid register name \"""" << Name << ""\"".""; return RISCV::NoRegister; } if (isReservedRegister(Name)) { errs() << ""Trying to obtain non-reserved register \"""" << Name << ""\"".""; return RISCV::NoRegister; } int RegID = getTRIRegisterID(Name); if (!RISCV::isValidRegister(RegID)) { errs() << ""Invalid register name \"""" << Name << ""\"".""; return RISCV::NoRegister; } return RegID; }" LLVM,RISCV,613,"void LowerOperation(SDValue Op, SelectionDAG &DAG) { unsigned Opcode = Op.getOpcode(); switch (Opcode) { case RISCVISD::FMV_W_X_RV64: { SDValue Source = Op.getOperand(0); SDLoc DL(Op); ConstantPoolSDNode *CP = dyn_cast(Source); if (!CP) report_fatal_error(""FMV_W_X_RV64 instruction should have a constant pool operand!""); SDValue CPValue = DAG.getTargetConstantPool(CP->getConstVal(), MVT::i32, CP->getAlignment()); SDValue Node = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::i64, CPValue); DAG.ReplaceAllUsesOfValueWith(Op.getValue(), Node); return; } case ISD::GlobalAddress: case ISD::BlockAddress: case ISD::ConstantPool: case ISD::GlobalTLSAddress: { DAG.getTargetLoweringInfo().LowerOperation(Op, DAG); return; }" LLVM,RISCV,614,ConstraintType getConstraintType(char Constraint) { switch (Constraint) { case 'R': return ConstraintType::TYPE_RISCV; case '0': return ConstraintType::TYPE_ZERO; case '1': return ConstraintType::TYPE_ONE; default: return ConstraintType::TYPE_RISCV; } } LLVM,RISCV,615," StringRef ArgName) const { if (CallConv != CallingConv::C) { errs() << ""Unsupported calling convention for RISCV backend.\n""; exit(1); } if (Ins.size() > 16) { errs() << ""VarArg not supported for RISCV backend.\n""; exit(1); }" LLVM,RISCV,616,"void RISCVTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { case RISCVISD::OP1: break; case RISCVISD::OP2: break; case ISD::UNIMPL_OPERAND: llvm_unreachable(""Unimplemented operand for RISCV target""); break; case ISD::GLOBAL_ADDRESS: break; case ISD::BLOCK_ADDRESS: break; case ISD::SELECT: break; } }" LLVM,RISCV,617," CCValAssign &VA) { MachineFunction &MF = MIRBuilder.getMF(); MachineRegisterInfo &MRI = MF.getRegInfo(); unsigned RetCode = RISCVISD::RET_FLAG; MVT ValVT = VA.getLocVT(); if (ValVT != MVT::Other) { if (VA.isRegLoc()) { Register Reg = VA.getLocReg(); MIRBuilder.buildCopy(Reg, Val); } else { report_fatal_error(""Can only return in registers!""); } } else { unsigned NumRegs = VA.getValVT().getVectorNumElements(); for (unsigned i = 0; i < NumRegs; ++i) { Register Reg = VA.getLocReg(); MIRBuilder.buildCopy(Reg, Val); Index++; } } MIRBuilder.buildInstr(RetCode); return; }" LLVM,RISCV,618, LLVMContext &Context) const { if (CallConv != CallingConv::RISCV) return false; for (unsigned i = 0; i < Outs.size(); ++i) { const ISD::OutputArg &Out = Outs[i]; EVT ArgVT = Out.ArgVT; if (!isTypeLegal(ArgVT)) return false; if (!ArgVT.isScalar()) return false; if (RISCVABI::getNumRegistersForType(ArgVT) > 1) return false; } return true; } LLVM,RISCV,619," void RISCVTargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl &InVals) const { unsigned NumArgs = Ins.size(); for (unsigned i = 0; i < NumArgs; ++i) { const ISD::InputArg &In = Ins[i]; if (In.PartOffset != 0) { llvm_unreachable(""Unsupported calling convention""); return; } if (In.Flags.isVarArg()) { llvm_unreachable(""VarArg not supported""); return; } if (In.ArgVT.isVoid()) { llvm_unreachable(""Unhandled argument type""); return; } EVT ArgVT = In.ArgVT; SDValue ArgValue = DAG.getTargetConstant(ISD::ADD, dl, MVT::i32); if (ArgVT.isInteger()) { ArgValue = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, ArgValue); } InVals.push_back(ArgValue); } return Chain; }" LLVM,RISCV,620, LLVM,RISCV,621,"Value *getAddr(MachineInstr &MI, MachineBasicBlock &MBB, MachineRegisterInfo &MRI) { unsigned Opcode = MI.getOpcode(); if (Opcode != RISCV::LUI && Opcode != RISCV::ADDI && Opcode != RISCV::PseudoLLA) { errs() << ""Unsupported code model for lowering""; return NULL; } MachineOperand &MO = MI.getOperand(0); if (!MO.isReg()) { errs() << ""Not a valid register""; return NULL; } unsigned Reg = MO.getReg(); unsigned Val = MRI.getUniqueVirtReg(Reg); if (Val > UINT16_MAX) { errs() << ""Address out of bounds""; return NULL; } unsigned Addr = Val; if (Opcode == RISCV::MO_HI || Opcode == RISCV::MO_LO) { MachineOperand &MO2 = MI.getOperand(1); if (!MO2.isImm()) { errs() << ""Not a valid immediate""; return NULL; } int Imm = MO2.getImm(); if (Opcode == RISCV::MO_HI) { Addr = (Addr >> 12) + ((Imm + 1) << 20); } else { Addr = (Addr & 0xFFF) + (Imm << 12); }" LLVM,RISCV,622,"SDValue LowerOperation(ISD::NodeType Operation, SDValue Op) { switch (Operation) { case ISD::GlobalAddress: case ISD::BlockAddress: case ISD::ConstantPool: { return SDValue(); } case ISD::SELECT: { return SDValue(CustomLowering::RISCVISD::SELECT, 0); } case ISD::VASTART: { return SDValue(CustomLowering::RISCVISD::VASTART, 0); } case ISD::FRAMEADDR: { return SDValue(CustomLowering::RISCVISD::FRAMEADDR, 0); } case ISD::RETURNADDR: { return SDValue(CustomLowering::RISCVISD::RETURNADDR, 0); } case ISD::SHL_PARTS: { return SDValue(CustomLowering::RISCVISD::SHL_PARTS, 0); } case ISD::SRA_PARTS: { return SDValue(CustomLowering::RISCVISD::SRA_PARTS, 0); }" LLVM,RISCV,623, LLVM,RISCV,624,"std::string getTargetNodeName(RISCV::NodeType NT) { switch (NT) { case RISCV::RISCVISD::NodeType: return ""RISCV""; case RISCV::RISCVISD::FIRST_NUMBER: return ""RISCVISD::FIRST_NUMBER""; case RISCV::RISCVISD::RET_FLAG: return ""RISCVISD::RET_FLAG""; case RISCV::RISCVISD::CALL: return ""RISCVISD::CALL""; case RISCV::RISCVISD::SELECT_CC: return ""RISCVISD::SELECT_CC""; case RISCV::RISCVISD::BuildPairF64: return ""RISCVISD::BuildPairF64""; case RISCV::RISCVISD::SplitF64: return ""RISCVISD::SplitF64""; case RISCV::RISCVISD::TAIL: return ""RISCVISD::TAIL""; default: return """"; } }" LLVM,RISCV,625, LLVM,RISCV,626,"bool getPostIndexedAddressParts(const SDNode *N, SDValue &BasePtr, SDValue &OffsetPtr, ISD::MemIndexedMode &AM, SelectionDAG &DAG) { if (N->getOpcode() != RISCV || !(N->getOpcode() == ISD::LOAD || N->getOpcode() == ISD::STORE)) { return false; } if (N->getNumOperands() != 2) { return false; } SDValue Offset = N->getOperand(1); if (!(Offset.isConstantOrUndef()) || !(Offset.isNonnegative())) { return false; }" LLVM,RISCV,627,"function LowerOperation(Operation, DAG) { if (Operation.getOpcode() == RISCVISD::FMV_W_X_RV64) { SDValue Operand = Operation.getOperand(0); EVT OpType = Operand.getValueType(); if (OpType == MVT::f32) { SDValue Result = DAG.getNode(RISCVISD::FMV_W_XF, SDLoc(Operation), MVT::i32, Operand); return Result; } else if (OpType == MVT::i32) { SDValue Result = DAG.getNode(RISCVISD::FMV_W_XI, SDLoc(Operation), MVT::i32, Operand); return Result; } else { llvm_unreachable(""Unexpected custom legalisation!""); }" LLVM,RISCV,628,"bool getTgtMemIntrinsic(unsigned IntrinsicID, EVT ValueVT, EVT PtrVT){ switch(IntrinsicID){ case Intrinsic::riscv_masked_atomicrmw_xchg_i32: case Intrinsic::riscv_masked_atomicrmw_add_i32: case Intrinsic::riscv_masked_atomicrmw_sub_i32: case Intrinsic::riscv_masked_atomicrmw_nand_i32: case Intrinsic::riscv_masked_atomicrmw_max_i32: case Intrinsic::riscv_masked_atomicrmw_min_i32: case Intrinsic::riscv_masked_atomicrmw_umax_i32: case Intrinsic::riscv_masked_atomicrmw_umin_i32: case Intrinsic::riscv_masked_cmpxchg_i32: return true; case Intrinsic::riscv_masked_strided_load: if(ValueVT == MVT::getVT() && PtrVT == MVT::getVT()){ return true; } break; case Intrinsic::riscv_masked_strided_store: if(ValueVT == MVT::getVT() && PtrVT == MVT::getVT()){ return true; } break; } return false; }" LLVM,RISCV,629,"bool shouldSinkOperands(const Instruction *I) { if (I->getOpcode() == Intrinsic::fma) { int TargetValue = getTargetSpecificValue(RISCV, RISCV, 1, Intrinsic::fma, 0, 1, 0); if (TargetValue == 1) { BasicBlock *BB = I->getParent(); Value *Op0 = I->getOperand(0); Value *Op1 = I->getOperand(1); Value *Op2 = I->getOperand(2); if (isa(Op0) && isa(Op1) && isa(Op2)) { Instruction *Inst0 = cast(Op0); Instruction *Inst1 = cast(Op1); Instruction *Inst2 = cast(Op2); if (Inst0->getParent() == BB && Inst1->getParent() == BB && Inst2->getParent() == BB) { if (InstructionCost(Inst0) + InstructionCost(Inst1) + InstructionCost(Inst2) > InstructionCost(I)) { return true; } } } } } return false; }" LLVM,RISCV,630, LLVM,RISCV,631, LLVM,RISCV,632, LLVM,RISCV,633," SmallVectorImpl &OutChains) { assert(Op.getOpcode() == ISD::TargetOperation && ""Lowering operation wrapper called with non-target operation!""); assert(Ty.isSimple() && InChain.getValueType() == MVT::Other && ""Invalid result or chain types for lowering operation wrapper!""); EVT RISCV = EVT::getRISCVBits(TLI.getTargetMachine().getTargetTriple().string()); assert(RISCV != EVT::Other && ""Target does not support RISCV backend!""); if (!Op->getNumOperands()){ return Op; }" LLVM,RISCV,634, LLVM,RISCV,635,void computeKnownBitsForTargetNode(SDNode *TargetNode) { KnownZero.reset(); KnownOne.reset(); if (TargetNode->getOpcode() == RISCV) { if (TargetNode->getOpcode() == ISD::BUILTIN_OP_END) { KnownZero.set(); KnownOne.reset(); } else if (TargetNode->getOpcode() == ISD::INTRINSIC_WO_CHAIN) { KnownZero.reset(); KnownOne.reset(); } else if (TargetNode->getOpcode() == ISD::INTRINSIC_W_CHAIN) { KnownZero.reset(); KnownOne.reset(); } else if (TargetNode->getOpcode() == ISD::INTRINSIC_VOID) { KnownZero.reset(); KnownOne.reset(); } else if (TargetNode->getOpcode() == RISCVISD::REMUW) { KnownZero.reset(); KnownOne.flip(); } else if (TargetNode->getOpcode() == RISCVISD::DIVUW { KnownZero.reset(); KnownOne.flip(); } LLVM,RISCV,636,unsigned int ComputeNumSignBitsForTargetNode(unsigned int Opcode) { switch (Opcode) { case RISCVISD::SLLW: case RISCVISD::SRAW: case RISCVISD::SRLW: case RISCVISD::DIVW: case RISCVISD::DIVUW: case RISCVISD::REMUW: case RISCVISD::ROLW: case RISCVISD::RORW: case RISCVISD::GREVIW: case RISCVISD::GORCIW: case RISCVISD::FSLW: case RISCVISD::FSRW: return 33; case RISCVISD::VMV_X_S: return 0; default: return 1; } } LLVM,RISCV,637, LLVM,RISCV,638,"std::string getTargetNodeName(unsigned OpCode) { switch (OpCode) { case RISCV::ADD: return ""RISCV::ADD""; case RISCV::SUB: return ""RISCV::SUB""; case RISCVISD::NODE: return ""RISCVISD::NODE""; case RISCVISD::RISCV: return ""RISCVISD::RISCV""; case RISCVISD::FIRST_NUMBER: return ""RISCVISD::FIRST_NUMBER""; default: llvm_unreachable(""Unknown opcode""); } }" LLVM,RISCV,639,"SDValue PerformDAGCombine(MachineSDNode *N, SelectionDAG *DAG) { if (N->getOpcode() != RISCVISD::SplitF64) { return SDValue(); } SDValue Op0 = N->getOperand(0); SDValue Op1 = N->getOperand(1); if (Op0.getValueType() != MVT::i64 || Op1.getValueType() != MVT::i64) { return SDValue(); } SDValue Combine = DAG.getNode(RISCVISD::BuildPairF64, SDLoc(N), MVT::f64, Op0, Op1); return Combine; }" LLVM,RISCV,640,bool allowsMisalignedMemoryAccesses(int targetType) { if (targetType == RISCV || targetType == RISCV64) { return true; } else { return false; } } LLVM,RISCV,641,"bool RISCVTargetLowering::getPostIndexedAddressParts(const TargetLowering::CallLoweringInfo &CLI, EVT VT, SDValue BasePtr, SDValue &Offset, ISD::MemIndexedMode &AM) const { ConstantSDNode *C1 = dyn_cast(*CLI->Operands[0]); ConstantSDNode *C2 = dyn_cast(*CLI->Operands[1]); if (CLI->getOpcode() != ISD::ADD || !C1 || !C2 || C1->getZExtValue() != 0 || C2->getZExtValue() != 1) return false; if (!(VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)) return false; AM = ISD::POST_INC; Offset = CLI->Operands[1]; return true; }" LLVM,RISCV,642," unsigned NumVOps, int InsertMode) { assert(MI.getDesc().usesCustomInserter() && ""Expected an instruction marked with 'usesCustomInserter' flag""); unsigned Opcode = MI.getOpcode(); const RISCVRegisterInfo *TRI = Subtarget->getRegisterInfo(); unsigned TSVal = TRI->getCustomInserterTSVal(); switch (TSVal) { case RISCV::SELECT_CC: case RISCV::SELECT_CC64: case RISCV::FSELECT_CC_F: case RISCV::FSELECT_CC_D: case RISCV::CALL: case RISCV::CALLREG: case RISCV::CALL64: case RISCV::CALLREG64: case RISCV::PEXTRACT_PSEUDO: case RISCV::PEXTRACT_PSEUDO_REV: case RISCV::PEXTRACTU_PSEUDO: case RISCV::PEXTRACTU_PSEUDO_REV: case RISCV::PINSERT_PSEUDO: case RISCV::PBCLR_PSEUDO: case RISCV::PBSET_PSEUDO: case RISCV::PADDRN_PSEUDO: case RISCV::PADDURN_PSEUDO: case RISCV::PSUBRN_PSEUDO: case RISCV::PSUBURN_PSEUDO: TRI->emitTargetSpecificInsn(MI, MBB, I, DL, NumVOps, InsertMode); break; default: llvm_unreachable(""Unexpected instr type to insert""); } }" LLVM,RISCV,643, LLVM,RISCV,644,"std::string getTargetNodeName(const TargetMachine &TM) { if (TM.getTargetTriple().getArch() == Triple::riscv32 || TM.getTargetTriple().getArch() == Triple::riscv64) return ""RISCV""; else return """"; }" LLVM,RISCV,645, LLVM,RISCV,646, LLVM,RISCV,647, LLVM,RISCV,648,"const char *getConstraintType(const char *Constraint) { switch (*Constraint) { case 'R': case 'I': case 'S': case 'L': return ""RISCV""; case '1': return ""RISCVRegClass1""; case '0': return ""RISCVRegClass0""; default: return nullptr; } }" LLVM,RISCV,649,"std::string getTargetNodeName(unsigned Opcode) { switch (Opcode) { case RISCV::RISCVISD::NodeType: return ""RISCVISD::NodeType""; case RISCV::RISCVISD::FIRST_NUMBER: return ""RISCVISD::FIRST_NUMBER""; case RISCV::RISCVISD::RET_FLAG: return ""RISCVISD::RET_FLAG""; case RISCV::RISCVISD::URET_FLAG: return ""RISCVISD::URET_FLAG""; case RISCV::RISCVISD::SRET_FLAG: return ""RISCVISD::SRET_FLAG""; case RISCV::RISCVISD::MRET_FLAG: return ""RISCVISD::MRET_FLAG""; case RISCV::RISCVISD::CALL: return ""RISCVISD::CALL""; case RISCV::RISCVISD::SELECT_CC: return ""RISCVISD::SELECT_CC""; case RISCV::RISCVISD::BuildPairF64: return ""RISCVISD::BuildPairF64""; case RISCV::RISCVISD::SplitF64: return ""RISCVISD::SplitF64""; case RISCV::RISCVISD::TAIL: return ""RISCVISD::TAIL""; case RISCV::RISCVISD::SETVL: return ""RISCVISD::SETVL""; case RISCV::RISCVISD::BROADCAST: return ""RISCVISD::BROADCAST""; default: llvm_unreachable(""Unknown RISCV opcode!""); } }" LLVM,RISCV,650, LLVM,RISCV,651,"unsigned ComputeNumSignBitsForTargetNode(unsigned OpCode, MVT VT) { int NumSignBits = -1; switch (OpCode) { case RISCVISD::SLLW: case RISCVISD::SRAW: case RISCVISD::SRLW: case RISCVISD::DIVW: case RISCVISD::DIVUW: case RISCVISD::REMUW: case RISCVISD::ROLW: case RISCVISD::RORW: case RISCVISD::GREVIW: case RISCVISD::GORCIW: case RISCVISD::FSLW: case RISCVISD::FSRW: case RISCVISD::VMV_X_S: NumSignBits = 32; break; default: llvm_unreachable(""Unexpected opcode for computing number of sign bits""); } return NumSignBits; }" LLVM,RISCV,652, LLVM,RISCV,653,"MVT::SimpleValueType getSetCCResultType(const RISCVSubtarget &STI, EVT VT) { SubtargetFeatures Features = STI->getFeatureBits(); bool IsRVVEnabled = Features.RVV; if (IsRVVEnabled) { MVT::SimpleValueType VecVT = MVT::getVectorVT(VT.getSizeInBits(), MVT::i1); return VecVT; } return MVT::i1; }" LLVM,RISCV,654,"std::string getTargetNodeName(unsigned Opcode, EVT VT) { switch(Opcode) { case RISCV::ADD: return ""RISCVISD::ADD""; case RISCV::SUB: return ""RISCVISD::SUB""; case RISCV::MUL: return ""RISCVISD::MUL""; default: break; } switch((RISCVISD::NodeType)Opcode) { case RISCVISD::Ret: return ""RISCVISD::Ret""; case RISCVISD::Br: return ""RISCVISD::Br""; case RISCVISD::JumpTable: return ""RISCVISD::JumpTable""; default: break; } return """"; }" LLVM,RISCV,655,"int ComputeNumSignBitsForTargetNode(int opcode, int numOperands, const char** operandTypes, int nodeFlags) { switch (opcode) { case RISCVISD::SELECT_CC: if (numOperands == 6 && operandTypes[4] == ""i32"" && operandTypes[5] == ""i32"") { return 1; } break; case RISCVISD::SLLW: case RISCVISD::SRAW: case RISCVISD::SRLW: { int nbits = nodeFlags & 0x1F; return nbits; } case RISCVISD::DIVW: case RISCVISD::DIVUW: case RISCVISD::REMUW: return 1; case RISCVISD::ROLW: case RISCVISD::RORW: case RISCVISD::GREVW: case RISCVISD::GORCW: { int nbits = nodeFlags & 0x1F; return nbits; }" LLVM,RISCV,656,"bool hasAndNotCompare() { LLVM.getTargetTriple(); if (targetTriple.contains(""riscv"")) { Instruction* I = if (I.getOpcode() == Instruction::And) { Value* X = I.getOperand(0); Value* Y = I.getOperand(1); if (I.getNextInstruction().getOpcode() == Instruction::ICmp && I.getNextInstruction().getOperand(0) == X) { CmpInst* cmpInst = cast(I.getNextInstruction()); Value* CmpY = cmpInst.getOperand(1); if (cmpInst.getPredicate() == CmpInst::ICMP_EQ && Y == CmpY) { Value* Xnot = Builder->CreateNot(X); Instruction* J = if (J.getOpcode() == Instruction::And) { if (J.getNextInstruction().getOpcode() == Instruction::ICmp && J.getNextInstruction().getOperand(0) == Xnot) { CmpInst* cmpInst = cast(J.getNextInstruction()); Value* CmpZero = ConstantInt::get(IntegerType::get(F.getContext(), 32), 0); if (cmpInst.getPredicate() == CmpInst::ICMP_EQ && CmpZero == cmpInst.getOperand(1)) { return true; } } } } } } } return false; }" LLVM,RISCV,657,bool shouldSinkOperands(Instruction* I) { if (I->getOpcode() != Instruction::FMul || I->getOpcode() != Instruction::FAdd) { return false; } Value* op1 = I->getOperand(0); Value* opl2 = I->getOperand(1); if (!isa(op1) || !isa(op2)) { return false; } if (op1->getParent() != I->getParent() || op2->getParent() != I->getParent()) { return false; } LLVM,RISCV,658,"enum { SELECT_CC, SELECT_CC64, FSELECT_CC_F, FSELECT_CC_D, CALL, CALLREG, CALL64, CALLREG64 }; void EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB, MachineFunction &MF, const TargetInstrInfo *TII) { DebugLoc dl = MI->getDebugLoc(); unsigned Opcode = MI->getOpcode(); const MCInstrDesc &MCID = MI->getDesc(); if (MCID.usesCustomInserter()) { if (!RISCV) { RISCV = MF.getSubtarget().getInstrInfo(); }" LLVM,RISCV,659," bool &isVarArg) { if (!Subtarget.supportsRISCV()) return false; for (unsigned i = 0, e = Outs.size(); i != e; ++i) { MVT VT = Outs[i]; ISD::ArgFlagsTy Flags = Outs[i+1]; if (VT.isInteger() && VT.getSizeInBits() <= 32 && Flags.isReturned() && !Flags.isByVal()) { MVT RegVT = getRegisterType(VT); if (RegVT.getSizeInBits() <= 32) { MaxSize = std::max(MaxSize, RegVT.getSizeInBits()); } else { return false; } } } return true; }" LLVM,RISCV,660, LLVM,RISCV,661,"void emitLeadingFence(RISCVTarget &Target, RISCVValue value) { RISCVAsmPrinter &Printer = getAsmPrinter(); const RISCVSubtarget &STI = Printer.getSubtarget(); if(STI.hasFence()) { const StringRef FenceASM = STI.getFenceASM(); Printer.OutStreamer.EmitRawText(FenceASM + value + ""\n""); } }" LLVM,RISCV,662,"void emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> &Builder, Value *Ptr, Value *OldVal, Value *NewVal, Value *Mask, AtomicOrdering MemOrdering) { LLVMContext &Context = Builder.getContext(); Intrinsic::ID ID; Type *IntTy; if (Ptr->getType()->isIntegerTy(32)) { ID = Intrinsic::riscv_masked_cmpxchg_i32; IntTy = Type::getInt32Ty(Context); } else if (Ptr->getType()->isIntegerTy(64)) { ID = Intrinsic::riscv_masked_cmpxchg_i64; IntTy = Type::getInt64Ty(Context); } else { llvm_unreachable(""Unsupported pointer type for RISCV masked cmpxchg intrinsic""); } Function *MaskedCmpXchgIntr = Intrinsic::getDeclaration(Builder.GetInsertBlock()->getParent()->getParent(), ID); Value *Args[] = {Ptr, OldVal, NewVal, Mask, ConstantInt::get(IntTy, (uint64_t)MemOrdering)}; Value *Result = Builder.CreateCall(MaskedCmpXchgIntr, Args); return Builder.CreateBitCast(Result, OldVal->getType()); }" LLVM,RISCV,663, LLVM,RISCV,664, LLVM,RISCV,665,"EVT getSetCCResultType(EVT VT, const RISCVSubtarget *STI, LLVMContext &Context) { if(VT == MVT::RISCV) { return VT; } else if(VT.isVector() && VT.getScalarType() == MVT::RISCV) { return VT; } return getSetCCResultType(VT, Context); }" LLVM,RISCV,666, LLVM,RISCV,667, LLVM,RISCV,668,"bool IsEligibleForTailCallOptimization(const TargetMachine &TM, const MachineInstr *MI) { if (TM.getTargetTriple().getArch() != Triple::riscv) return false; if (MI->getOpcode() != RISCV::CALL) return false; if (MI->getFunction()->hasFnAttribute(""disable-tail-calls"")) return false; if (!MI->getOperand(0).getGlobal()->hasFnAttribute(""tail"")) return false; if (MI->getOperand(0).getGlobal()->hasFnAttribute(""interrupt"")) return false; return true; }" LLVM,RISCV,669,"bool isLegalAddImmediate(unsigned Value, unsigned Size) { if (Target.getArch() != Triple::riscv) { return false; } if (Size != 32 && Size != 64) { return false; } if (Value >= -2048 && Value <= 2047) { return true; } if (Value % 4 == 0) { return true; } unsigned RISCVTargetValue = 12; if ((Value + RISCVTargetValue) % 4 == 0) { return true; } return false; }" LLVM,RISCV,670,"bool isLegalAddressingMode(const TargetLoweringBase::AddrMode &AM, Type *Ty) { if (!Ty->isPointerTy()) return false; GlobalValue *BaseGV = AM.BaseGV; int64_t Offset = AM.BaseOffs; unsigned AddrSpace = AM.AddrSpace; uint64_t RISCVSpecificValue = 17179869185; if (!BaseGV && AddrSpace != 0) { return false; } if (Offset < -1073741824 || Offset > 1073741823) return false; if (uint64_t(Offset) & RISCVSpecificValue) return true; if (uint64_t(Offset) % 4 == 0) return true; return false; }" LLVM,RISCV,671,bool isLegalICmpImmediate(int imm_val) { if (imm_val >= -2048 && imm_val <= 2047) { return true; } return false; } LLVM,RISCV,672,"bool isSExtCheaperThanZExt(MVT::SimpleValueType FromTy, MVT::SimpleValueType ToTy) { } The function body would utilize the target specific values given in the description. For RISCV backend, we can use the following code: if (FromTy == MVT::i32 && ToTy == MVT::i64) { return true; }" LLVM,RISCV,673,"bool isTruncateFree(const Type* Ty1, const Type* Ty2, unsigned int RISCV, unsigned int RISCV64, unsigned int RISCV32) { if (Ty1->getPrimitiveSizeInBits() > Ty2->getPrimitiveSizeInBits()) { return false; } if (Ty1->isFloatingPointTy() && Ty2->isIntegerTy()) { unsigned int Ty1Size = Ty1->getPrimitiveSizeInBits(); unsigned int Ty2Size = Ty2->getIntegerBitWidth(); if (RISCV || RISCV64) { if (Ty1Size == 64) { if (RISCV64) return true; return false; } }" LLVM,RISCV,674,"bool isZExtFree(const SDNode *Val, MVT::SimpleValueType VT2) { if (!Val->isInstruction() || !Val->isLoad()) return false; if (Val->isTargetSpecific()) if (Val->getTargetOpcode() == RISCV::NON_EXTLOAD) return true; else if (Val->getTargetOpcode() == RISCV::ZEXTLOAD) { if (Val->getSimpleValueType(0) == VT2) { if (VT2 == MVT::i8 || VT2 == MVT::i16 || VT2 == MVT::i32) return true; } } return false; }" LLVM,RISCV,675,"void LowerOperation(SDValue Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { case RISCV: case ""unimplemented operand"": case ISD::GlobalAddress: case ISD::BlockAddress: case ISD::ConstantPool: case ISD::SELECT: case ISD::VASTART: case ISD::FRAMEADDR: case ISD::RETURNADDR: default: llvm_unreachable(""Not a valid operation""); } }" LLVM,RISCV,676, LLVM,RISCV,677,"bool shouldConvertConstantLoadToIntImm(unsigned Opcode, const TargetRegisterInfo *TRI) { if (Opcode == RISCV::LOAD_CONSTANT) { return true; } return false; }" LLVM,RISCV,678, LLVM,RISCV,679,bool shouldExpandAtomicRMWInIR(AtomicRMW& inst) { if (inst.getTargetSpecificValue() == RISCV || inst.getTargetSpecificValue() == RISCV || inst.getTargetSpecificValue() == 8 || inst.getTargetSpecificValue() == 16) { return true; } else if (inst.isAtomicOperation()) { return true; } return false; } LLVM,RISCV,680,"if (shouldInsertFencesForAtomic(&TM, MF, MBB, SyncScope)) { } else { }" LLVM,RISCV,681,"StringRef getPassName() { return ""RISCV""; }" LLVM,RISCV,682,"void runOnMachineFunction() { int RISCV1 = RISCV::GPRRegClass; int RISCV2 = RISCV::FPR32RegClass; int RISCV3 = RISCV::ADDI; int RISCV4 = RISCV::FSGNJ_S; int RISCV5 = RISCV::FSGNJ_D; int RISCV6 = 0; int RISCV7 = 8; int RISCV8 = 12; std::cout << ""MBB: "" << std::endl; std::cout << RISCV1; std::cout << RISCV2; std::cout << RISCV3; std::cout << RISCV6; std::cout << RISCV8; std::cout << RISCV4; std::cout << RISCV5; }" LLVM,RISCV,683, const RISCVRegisterInfo &TRI) { if (Instr->getOpcode() == RISCV::Unexpected) { MachineOperand &Op0 = Instr->getOperand(0); MachineOperand &Op2 = Instr->getOperand(2); if (Op0.isReg() && Op2.isReg()) { unsigned Reg0 = Op0.getReg(); unsigned Reg2 = Op2.getReg(); if (RISCV::GPR32RegClass.contains(Reg0) && RISCV::GPR32RegClass.contains(Reg2)) { switch (Instr->getOpcode()) { case RISCV::Unexpected: Instr->setDesc(TII.get(RISCV::Relaxed_Unexpected)); break; default: break; } } } } } LLVM,RISCV,684,"llvm::MCObjectWriter *createObjectWriter(llvm::raw_pwrite_stream &OS, uint32_t TargetTriple) { const llvm::Target *TheTarget = llvm::TargetRegistry::lookupTarget(""RISCV"", TargetTriple); if (!TheTarget) { return nullptr; } llvm::MCContext *Ctx = new llvm::MCContext(); llvm::MCCodeEmitter *CE = TheTarget->createMCCodeEmitter(*Ctx, nullptr, *Ctx); llvm::MCAsmBackend *MAB = TheTarget->createMCAsmBackend(*Ctx, nullptr, nullpttrazzi, TargetTriple); llvm::MCObjectFileInfo *MOFI = new llvm::MCObjectFileInfo(); MOFI->InitMCObjectFileInfo(TargetTriple, false, *Ctx); return MAB->createObjectWriter(OS); }" LLVM,RISCV,685,bool fixupNeedsRelaxation(const std::vector &Instructions) { for (const MachineInstr &Instr : Instructions) { if (Instr.getDesc().TSFlags && RISCV::RISCV) { if (!Instr.getDesc().TSFlags && Resolved) { return true; } } } return false; } LLVM,RISCV,686,"bool mayNeedRelaxation(const MachineInstr *MI, const TargetMachine &TM) { if (TM.getTargetTriple().getArch() != llvm::Triple::riscv) { return false; } if (TM.getInstrInfo()->get(MI->getOpcode()).TSFlags != 0) { return false; } if (MI->mayNeedRelaxation(TM, nullptr)) { return true; } return false; }" LLVM,RISCV,687," const MCInstrInfo *MCII, const TargetInstrInfo *TII) { MachineFunction *MF = MBB->getParent(); const TargetSubtargetInfo &STI = MF->getSubtarget(); const MCRegisterInfo *MRI = STI.getRegisterInfo(); const MCInstrDesc &Desc = MI->getDesc(); if (!Desc.isPseudo()) return; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &Op = MI->getOperand(i); if (!Op.isReg()) continue; Register Reg = Op.getReg(); if (MRI->getRegClass(Reg) != &RISCV::I64RegClass) continue; MCInstrDesc WideDesc = MCII->get(MI->getOpcode() + 1); if (!WideDesc) report_fatal_error(""Unexpected insn to relax""); MI->setDesc(WideDesc); } }" LLVM,RISCV,688,"void writeNopData(uint8_t *Output, size_t Count) { if (Count == 0) return; uint32_t NopInstruction = 0x00000013; uint8_t *CurrPtr = Output; for (size_t i = 0; i < Count; ++i) { memcpy(CurrPtr, &NopInstruction, 4); CurrPtr += 4; } if (Count % 4 != 0) { for (size_t i = 0; i < (4 - (Count % 4)); ++i) { memcpy(CurrPtr, &NopInstruction, 1); CurrPtr++; } } if (TargetSpecificValue == 0x78536952) { applyTargetSpecificTransformation(Output, Count); } }" LLVM,RISCV,689, LLVM,RISCV,690," void encodeInstruction(Inst* instruction, Bytes& CB, const RISCVTargetInfo& targetInfo) { switch(instruction->getOpcode()) { case RISCV::PseudoCALLReg: break; case RISCV::PseudoCALL: break; case RISCV::PseudoTAIL: break; case RISCV::PseudoJump: break; case RISCV::PseudoCCALLReg: break; case RISCV::PseudoCCALL: break; case RISCV::PseudoCTAIL: break; case RISCV::PseudoCJump: break; case RISCV::PseudoAddTPRel: break; case RISCV::PseudoCIncOffsetTPRel: break; default: llvm_unreachable(""Unhandled encodeInstruction length!""); } CB.append(instruction->encodedBytes()); }" LLVM,RISCV,691,"String encodeInstruction(Inst) { String encoded_bytes = """" encoded_bytes = encoded_bytes + support::endian(support::little(3)) encoded_bytes = encoded_bytes + encode(RISCV) + encode(RISCV) + encode(RISCV::PseudoCALLReg) encoded_bytes = encoded_bytes + encode(RISCV::PseudoCALL) + encode(RISCV::PseudoTAIL) + encode(2) + encode(RISCV::PseudoAddTPRel) + encode(1) + encode(""Unhandled encodeInstruction length!"") encoded_bytes = encoded_bytes + support::endian(support::little(4)) return encoded_bytes }" LLVM,RISCV,692,void encodeInstruction(Instruction *Inst) { const void *TargetSpecificVal = (const void*) 0xABCDEF; BytesEncoded = encode(Inst); CB.append(BytesEncoded); CB.append(TargetSpecificVal); } LLVM,RISCV,693, LLVM,RISCV,694, LLVM,RISCV,695,"void encodeInstruction(Inst *Instruction, std::vector &Bytes) { uint64_t Encoding = Instruction->getEncoding(); Bytes.push_back((char)(Encoding & 0xFF)); Bytes.push_back((char)((Encoding >> 8) & 0xFF)); Bytes.push_back((char)((Encoding >> 16) & 0xFF)); Bytes.push_back((char)((Encoding >> 24) & 0xFF)); Bytes.push_back(RISCV); Bytes.push_back(RISCV); Bytes.push_back(RISCV::PseudoCALLReg); Bytes.push_back(RISCV::PseudoCALL); Bytes.push_back(RISCV::PseudoTAIL); Bytes.push_back(RISCV::PseudoJump); Bytes.push_back(2); Bytes.push_back(RISCV::PseudoAddTPRel); Bytes.push_back(1); Bytes.push_back(""Unhandled encodeInstruction length!""); Bytes.push_back(2); Bytes.push_back(support::endian); Bytes.push_back(support::little); Bytes.push_back(4); Bytes.push_back(support::endian); Bytes.push_back(support::little); }" LLVM,RISCV,696,void encodeInstruction(Inst inst) { bytes = instToBytes(inst); CB.append(RISCV); CB.append(RISCV); CB.append(0); CB.append(0); CB.append(8); CB.append(bytes); } LLVM,RISCV,697," uint64_t RISCVTargetLowering::getMachineOpValue(const MachineOperand &MO) const { switch(MO.getType()) { case MachineOperand::MO_Register: return getRegisterNumbering(MO.getReg()); case MachineOperand::MO_Immediate: return MO.getImm(); case MachineOperand::MO_FPImmediate: return getFPImmediateEncoding(MO.getFPImm()); default: llvm_unreachable(""Unexpected operand type!""); } }" LLVM,RISCV,698," const MCSubtargetInfo &STI, MCInst &MI) { MCOperand Lower = MI.getOperand(0); MCOperand Upper = MI.getOperand(1); MCOperand Target = MI.getOperand(2); if (MI.getOpcode() == RISCV::PseudoCALL || MI.getOpcode() == RISCV::PseudoTAIL) { CB += encodeInst(Inst); } else if (MI.getOpcode() == RISCV) { CB += encodeInst(Inst); } else if (MI.getOpcode() == RISCV::PseudoAddTPRel) { CB += encodeInst(Inst); } else { report_fatal_error(""Unhandled encodeInstruction length!""); } CB += encodeInst(2); CB += encodeInst(1); CB += encodeInst(""Unhandled encodeInstruction length!""); CB += encodeInst(2); CB += encodeInst(support::little); CB += encodeInst(4); CB += encodeInst(support::little); MI.addOperand(Target); }" LLVM,RISCV,699,"int encodeInstruction(Inst, CB) { int opcode; switch(Inst) { case RISCV: if (Inst.getOpcode() == RISCV::ADD) { opcode = 0x03; } else if (Inst.getOpcode() == RISCV::SUB) { opcode = 0x01; } break; case RISCV: if (Inst.getOpcode() == RISCV::ADDI) { opcode = 0x13; } else if (Inst.getOpcode() == RISCV::SLLI) { opcode = 0x01; }" LLVM,RISCV,700, LLVM,RISCV,701,"void create(LLVMContext &Context, StructType *RISCVType) { StructType *Struct = StructType::create(Context, ""RISCV""); Struct->setBody({RISCVType}); Constant *ConstantRISCV = ConstantInt::get(RISCVType, RISCV); Metadata *RISCVMD = ConstantAsMetadata::get(ConstantRISCV); Struct->addAttribute(AttributeList::AttrIndex::FunctionIndex, ""RISCV"", RISCVMD); Struct->setBody({RISCVType, RISCVType, RISCVType, RISCVType}); return Struct; }" LLVM,RISCV,702, LLVM,RISCV,703,"RISCV::fixup_riscv_pcrel_hi20 getPCRelHiFixup(unsigned int VK_RISCV_PCREL_LO){ if (VK_RISCV_PCREL_LO == RISCV::VK_RISCV_PCREL_LO) { return RISCV::fixup_riscv_pcrel_hi20; } else { std::cerr << ""Error: Unsupported fixup type for RISCV backend.""; return RISCV::fixup_riscv_pcrel_hi20(); } }" LLVM,RISCV,704,"std::string getVariantKindName(uint64_t Kind) { switch (Kind) { case MCSymbolRefExpr::VK_RISCV_LO: return ""lo""; case MCSymbolRefExpr::VK_RISCV_HI: return ""hi""; case MCSymbolRefExpr::VK_RISCV_PCREL_LO: return ""pcrel_lo""; case MCSymbolRefExpr::VK_RISCV_PCREL_HI: return ""pcrel_hi""; default: return ""Invalid ELF symbol kind""; } }" LLVM,RISCV,705,"bool evaluateAsConstant(MCFixup &fixup, uint64_t &value, MCContext &context, const MCAsmLayout &layout, MCAConstantPools *constantPools = nullptr) { if (fixup.getTargetKind() != RISCV) { return false; } int value1 = fixup.getValue1(); int value2 = fixup.getValue2(); int value3 = fixup.getValue3(); if (value1 < 0 || value2 < 0 || value3 < 0) { return false; }" LLVM,RISCV,706,"void getPCRelHiFixup(uint64_t &Res, const MCFragment* Fragment = nullptr) { if (Res == RISCV::fixup_riscv_got_hi20) { Res = RISCV::fixup_riscv_pcrel_hi20; } else if (Res == RISCV::fixup_riscv_tls_got_hi20) { Res = RISCV::fixup_riscv_tls_gd_hi20; } }" LLVM,RISCV,707,"StringRef RISCVTargetObjectFile::getVariantKindName(unsigned Kind) const { switch (Kind) { case TargetOptions::RISCV: return ""RISCV""; case TargetOptions::RISCV: return ""RISCV""; case TargetOptions::RISCV: return ""RISCV""; case TargetOptions::RISCV: return ""RISCV""; case TargetOptions::Invalid: return ""Invalid ELF symbol kind""; case TargetOptions::RISCV: return ""lo""; case TargetOptions::RISCV: return ""hi""; case TargetOptions::RISCV: return ""pcrel_lo""; case TargetOptions::RISCV: return ""pcrel_hi""; case TargetOptions::RISCV: return ""got_pcrel_hi""; case TargetOptions::RISCV: return ""tprel_lo""; case TargetOptions::RISCV: return ""tprel_hi""; case TargetOptions::RISCV: return ""tprel_add""; case TargetOptions::RISCV: return ""tls_ie_pcrel_hi""; case TargetOptions::RISCV: return ""tls_gd_pcrel_hi""; case TargetOptions::RISCV: return ""call""; case TargetOptions::RISCV: return ""call_plt""; case TargetOptions::RISCV: return ""32_pcrel""; default: return ""Invalid ELF symbol kind""; } }" LLVM,RISCV,708,"int evaluateAsConstant(RISCV::FixupKind Kind, uint64_t Value) { switch (Kind) { case RISCV::fixup_riscv_lo12_i: return (Value & 0xfff); case RISCV::fixup_riscv_lo12_s: return (Value & 0xfff); case RISCV::fixup_riscv_hi20: return (Value >> 12) & 0xfffff; case RISCV::fixup_riscv_call: return (Value >> 2) & 0x3ffffff; case RISCV::fixup_riscv_call_plt: return (Value >> 2) & 0x3ffffff; case RISCV::fixup_riscv_pcrel_hi20: return (Value >> 12) & 0xfffff; case RISCV::fixup_riscv_pcrel_lo12_i: return (Value & 0xfff); case RISCV::fixup_riscv_pcrel_lo12_s: return (Value & 0xfff); case RISCV::fixup_riscv_pcrel_lo12_i_disp: return (Value & 0xfff); case RISCV::fixup_riscv_pcrel_lo12_s_disp: return (Value & 0xfff); case RISCV::fixup_riscv_rvc_jump: case RISCV::fixup_riscv_rvc_branch: return (Value & 0x1ff) << 2; case RISCV::fixup_riscv_rvc_jump_imm11: case RISCV::fixup_riscv_rvc_branch_imm12: return (Value & 0xfff); case RISCV::fixup_riscv_addi4spn: return (Value & 0xfff); default: return -1; } }" LLVM,RISCV,709," uint64_t Value, unsigned Type) { Section &Parent = Sec.getOffsetSection(Offset); Fragment *F = getFragmentContainingOffset(Parent, Offset); Operand Fixup; Fixup.Value = Value; switch (Type) { case RISCV::fixup_riscv_got_hi20: Fixup.TargetSpecificValue = RISCV::fixup_riscv_pcrel_hi20; break; case RISCV::fixup_riscv_tls_got_hi20: Fixup.TargetSpecificValue = RISCV::fixup_riscv_tls_gd_hi20; break; case RISCV::fixup_riscv_tls_gd_hi20: Fixup.TargetSpecificValue = RISCV::fixup_riscv_tls_gd_captab_pcrel_hi20; break; case RISCV::fixup_riscv_pcrel_hi20: Fixup.TargetSpecificValue = RISCV::fixup_riscv_pcrel_hi20; break; case RISCV::fixup_riscv_captab_pcrel_hi20: Fixup.TargetSpecificValue = RISCV::fixup_riscv_captab_pcrel_hi20; break; case RISCV::fixup_riscv_tls_ie_captab_pcrel_hi20: Fixup.TargetSpecificValue = RISCV::fixup_riscv_tls_ie_captab_pcrel_hi20; break; } return Fixup; }" LLVM,RISCV,710,"string getVariantKindName(VariantKind kind) { switch (kind) { case VK_RISCV: return ""RISCV""; case VK_RISCV_LO: return ""lo""; case VK_RISCV_HI: return ""hi""; case VK_RISCV_PCREL_LO: return ""pcrel_lo""; case VK_RISCV_PCREL_HI: return ""pcrel_hi""; case VK_RISCV_GOT_PCREL_HI: return ""got_pcrel_hi""; case VK_RISCV_TPREL_LO: return ""tprel_lo""; case VK_RISCV_TPREL_HI: return ""tprel_hi""; case VK_RISCV_TPREL_ADD: return ""tprel_add""; case VK_RISCV_TLS_IE_PCREL_HI: return ""tls_ie_pcrel_hi""; case VK_RISCV_TLS_GD_PCREL_HI: return ""tls_gd_pcrel_hi""; case VK_RISCV_CAPTAB_PCREL_HI: return ""captab_pcrel_hi""; case VK_RISCV_TPREL_CINCOFFSET: return ""tprel_cincoffset""; case VK_RISCV_TLS_IE_CAPTAB_PCREL_HI: return ""tls_ie_captab_pcrel_hi""; case VK_RISCV_TLS_GD_CAPTAB_PCREL_HI: return ""tls_gd_captab_pcrel_hi""; case VK_RISCV_CALL: return ""call""; case VK_RISCV_CALL_PLT: return ""call_plt""; case VK_RISCV_CCALL: return ""ccall""; case VK_RISCV_32_PCREL: return ""32_pcrel""; default: return ""Invalid ELF symbol kind""; } }" LLVM,RISCV,711,"Constant *evaluateAsConstant(FixupValue){ Constant *cval = null; if (FixupValue == 1 || FixupValue == 0){ cval = ConstantInt::get(TargetIntPtrTy, FixupValue, true); } return cval; }" LLVM,RISCV,712, LLVM,RISCV,713, LLVM,RISCV,714,"static const char *getVariantKindName(RISCV::ELF::SymbolTypes kind) { switch (kind) { case RISCV::ELF::STO_RISCV: return ""RISCV""; case RISCV::ELF::STO_LO: return ""lo""; case RISCV::ELF::STO_HI: return ""hi""; case RISCV::ELF::STO_PCREL_LO: return ""pcrel_lo""; case RISCV::ELF::STO_PCREL_HI: return ""pcrel_hi""; case RISCV::ELF::STO_GOT_PCREL_HI: return ""got_pcrel_hi""; case RISCV::ELF::STO_TPREL_LO: return ""tprel_lo""; case RISCV::ELF::STO_TPREL_HI: return ""tprel_hi""; case RISCV::ELF::STO_TPREL_ADD: return ""tprel_add""; case RISCV::ELF::STO_TLS_IE_PCREL_HI: return ""tls_ie_pcrel_hi""; case RISCV::ELF::STO_TLS_GD_PCREL_HI: return ""tls_gd_pcrel_hi""; default: return ""Invalid ELF symbol kind""; } }" LLVM,RISCV,715,RISCV::FixupKind getFixupKind(const MCExpr *Expr) { if (const MCSymbolRefExpr *SRE = dyn_cast(Expr)) { if (SRE->getSymbol().isExternal()) return RISCV::FixupKind::FK_GOT; else return RISCV::FixupKind::FK_Data_8; } else if (const MCBinaryExpr *BE = dyn_cast(Expr)) { if (BE->getOpcode() == MCBinaryOps::Sub) { return RISCV::FixupKind::FK_RISCV_32_PCREL; } } return RISCV::FixupKind::FK_NONE; } LLVM,RISCV,716, LLVM,RISCV,717," const MCAssembler &Asm) { if (const MCConstantExpr *CE = dyn_cast(Expr)) { return ConstantInt::get(CE->getValue()); } else if (const MCSymbolRefExpr *SRE = dyn_cast(Expr)) { switch (SRE->getKind()) { case MCSymbolRefExpr::VK_RISCV_HI: return ConstantInt::get(TD->getPointerSizeInBits(), RISCV_HI); case MCSymbolRefExpr::VK_RISCV_LO: return ConstantInt::get(TD->getPointerSizeInBits(), RISCV_LO); case MCSymbolRefExpr::VK_RISCV_PCREL_HI: return ConstantInt::get(TD->getPointerSizeInBits(), RISCV_PCREL_HI); case MCSymbolRefExpr::VK_RISCV_PCREL_LO: return ConstantInt::get(TD->getPointerSizeInBits(), RISCV_PCREL_LO); default: return nullptr; } } return nullptr; }" LLVM,RISCV,718,struct RISCV { int RISCV1; int RISCV2; int RISCV3; int RISCV4; int RISCV5; }; RISCV create() { RISCV riscv_instance; riscv_instance.RISCV1 = 1; riscv_instance.RISCV2 = 2; riscv_instance.RISCV3 = 3; riscv_instance.RISCV4 = 4; riscv_instance.RISCV5 = 5; return riscv_instance; } LLVM,RISCV,719,"RISCV::FixupKind getFixupKind(RISCVMCExpr::VariantKind ExprKind) { switch (ExprKind) { case RISCVMCExpr::VK_RISCV_LO12: return RISCV::fixup_riscv_lo12; case RISCVMCExpr::VK_RISCV_HI20: return RISCV::fixup_riscv_hi20; case RISCVMCExpr::VK_RISCV_PCREL_LO12: return RISCV::fixup_riscv_pcrel_lo12; case RISCVMCExpr::VK_RISCV_PCREL_HI20: return RISCV::fixup_riscv_pcrel_hi20; case RISCVMCExpr::VK_RISCV_TPREL_LO12: return RISCV::fixup_riscv_tprel_lo12; case RISCVMCExpr::VK_RISCV_TPREL_HI20: return RISCV::fixup_riscv_tprel_hi20; default: llvm_unreachable(""Unhandled RISCVMCExpr::VariantKind""); } }" LLVM,RISCV,720,"MCFragment *getPCRelHiFixup(const MCExpr *Expr, uint64_t &FixedValue, MCFixupKind &Kind, const MCAssembler &Assembler, const MCAsmLayout &Layout) { if (const MCBinaryExpr *BE = dyn_cast(Expr)) { if (BE->getOpcode() == MCBinaryExpr::Sub && BE->getLHS()->getKind() == MCExpr::Target && cast(BE->getLHS())->getExprKind() == MCTargetExpr::TargetExprKind::VK_RISCV_PCREL_LO) { const MCSymbolRefExpr *SymRef = cast(BE->getRHS()); const MCSymbol &Sym = SymRef->getSymbol(); if (const MCDataFragment *DF = getFragmentForSymbol(Sym, Assembler)) { const MCAssembler &Ctx = Assembler.getContext(); MCAssembler::FixupKindVectorTy Fixups = DF->getFixups(); for (auto i = Fixups.begin(), e = Fixups.end(); i != e; ++i) { if (i->getKind() == RISCV::fixup_riscv_pcrel_hi20) { const MCFragment *F = DF; uint64_t Offset = i->getOffset(); if (Offset >= F->getContents().size()) { Offset -= F->getContents().size(); F = F->getNextNode(); } FixedValue = Layout.getFragmentOffset(F) + Offset - Layout.getSymbolOffset(&Sym); Kind = i->getKind(); return const_cast(F); } } } } } FixedValue = 0; return nullptr; }" LLVM,RISCV,721,"static inline bool classof(const TargetMachine *TM) { return TM->getTargetTriple().isArchRISCV(); } ``` In the above implementation, we are using the getTargetTriple method of the TargetMachine class to get the target triple of the given TargetMachine pointer. Then, we are using the isArchRISCV method of the Triple class to check if the target is RISCV or not. This method will return true if the architecture of the target triple is RISCV, otherwise it will return false. This function can be used in other functions such as isa, cast and dyn_cast to check the type of the target and perform operations accordingly. For example, the isa function can be implemented as follows: ``` template static inline bool isa(const TargetMachine *TM) { return T::classof(TM); }" LLVM,RISCV,722,"LLVMValueRef evaluateAsConstant(RISCVMachineFixup Kind, int64_t Value) { switch (Kind) { case fixup_riscv_riscv: return LLVMConstInt(LLVMInt64Type(), Value, 0); case fixup_riscv_riscv: return LLVMConstInt(LLVMInt64Type(), Value, 0); case fixup_riscv_riscv: return LLVMConstInt(LLVMInt64Type(), Value, 0); default: return NULL; } }" LLVM,RISCV,723,"LLVMVALUE findAssociatedFragment(Opcode opcode, Value *value) { if (isa(value)) return llvm::RISCV::SectionAbsolute; Value *firstDefinedSymbol = findFirstDefinedSymbol(value); if (!firstDefinedSymbol) return llvm::RISCV::SectionDefault; Section section = getSection(firstDefinedSymbol); if (section == llvm::RISCV::SectionInvalid) return llvm::RISCV::SectionDefault; return section; } Value* findFirstDefinedSymbol(Value *value) { if (isa(value) && dyn_cast(value)->isDeclaration()) return value; if (Instruction *inst = dyn_cast(value)) { for (Value::use_iterator it = inst->use_begin(); it != inst->use_end(); ++it) { Value *definedSymbol = findFirstDefinedSymbol(*it); if (definedSymbol) return definedSymbol; } } return nullptr; }" LLVM,RISCV,724,"namespace { struct TypeKind { int LaneOffset; int RISCV; }; } enum RISCVKind : unsigned { RISCV_RV32 = 0, RISCV_RV64 = 1 }; TypeKind getKind(const Target &T) { TypeKind TK; switch (T.getID()) { case Triple::riscv32: TK.LaneOffset = 32; TK.RISCV = RISCV_RV32; break; case Triple::riscv64: TK.LaneOffset = 64; TK.RISCV = RISCV_RV64; break; default: llvm_unreachable(""Unsupported RISCV target""); } return TK; }" LLVM,RISCV,725,Value* getSubExpr(Value* expr) { LLVMContext& ctxt = expr->getContext(); if (BinaryOperator* bin_op = dyn_cast(expr)) { Value* child = bin_op->getOperand(1); if (ConstantInt* const_int = dyn_cast(child)) { if (const_int->getValue().getBitWidth() == 32 && const_int->getValue().getZExtValue() == 0xFFF) { child = bin_op->getOperand(2); } } return child; } else { return nullptr; } } LLVM,RISCV,726,"std::string getVariantKindName(std::string variantKind) { switch(variantKind) { case ""RISCV"": return ""RISCV""; case ""lo"": return ""lo""; case ""hi"": return ""hi""; case ""pcrel_hi"": return ""pcrel_hi""; default: return ""Invalid ELF symbol kind""; } }" LLVM,RISCV,727, LLVM,RISCV,728," llvm::MCContext &Ctx) { llvm::MCOperand LoweredOperand; switch (MOp.getKind()) { case llvm::MCOperand::kRegister: LoweredOperand = MCInstLowering.lowerOperand(MI, MOp, OpNo, MO, Ctx); break; case llvm::MCOperand::kImmediate: LoweredOperand = MCOperand::createImm(MOp.getImm()); break; case llvm::MCOperand::kExpression: LoweredOperand = MCOperand::createExpr(MOp.getExpr()); break; default: LoweredOperand = MCOperand::createImm(0); }" LLVM,RISCV,729,"uint64_t evaluateBranch(uint64_t branchOffset, uint64_t currentPC, const MCInst &branchInst) { uint64_t target1 = branchInst.getOperand(0).getExpr()->getImm(); uint64_t target2 = branchInst.getOperand(1).getImm(); uint64_t target3 = branchInst.getOperand(2).getImm(); uint64_t target4 = branchInst.getOperand(3).getImm(); switch (branchInst.getOpcode()) { case RISCV::C_JAL: case RISCV::C_J: return currentPC + branchOffset; case RISCV::JAL: return (target4 << 32) | (target3 << 20) | (target2 << 1); default: llvm_unreachable(""Unsupported branch instruction!""); } }" LLVM,RISCV,730,int evaluateBranch(Instruction* branchInst){ if(branchInst->getOpcode() == RISCV::C_JAL || branchInst->getOpcode() == RISCV::C_J || branchInst->getOpcode() == RISCV::C_CJAL || branchInst->getOpcode() == RISCV::JAL || branchInst->getOpcode() == RISCV::CJAL){ if(branchInst->getOpcode() == RISCV::C_JAL || branchInst->getOpcode() == RISCV::C_J || branchInst->getOpcode() == RISCV::C_CJAL){ Operand* targetOp = branchInst->getOperand(1); if(targetOp->isConstant()){ int targetAddress = targetOp->getConstantValue(); return targetAddress; } else{ return nullptr; } } LLVM,RISCV,731,"void runOnMachineFunction(MachineFunction &MF) { LLVMContext &Ctx = MF.getParent()->getContext(); const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); MachineBlockFrequencyInfo *MBFI = &getAnalysis(); outs() << ""Emitting the function body for RISCV backend...\n""; for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); MBB != E; ++MBB) { outs() << TII->getName() << ""RISCV,RISCV,\""MBB: \"",\""\\n\"",\"" Found lowered global address with one use: "",2,""\\n\"""" << ""\n""; for (MachineBasicBlock::iterator MI = MBB->begin(), ME = MBB->end(); MI != ME; ++MI) { } } outs() << ""Finished emitting the function body for RISCV backend.\n""; }" LLVM,RISCV,732,"void runOnMachineFunction(MachineFunction &MF) { const RISCVSubtarget &STI = MF.getSubtarget(); const TargetLowering *TLI = STI.getTargetLowering(); const RISCVRegisterInfo *TRI = STI.getRegisterInfo(); MachineRegisterInfo &MRI = MF.getRegInfo(); bool Is64Bit = STI.is64Bit(); MachineBasicBlock &MBB = MF.front(); outs() << ""\n\nFunction Name: "" << MBB.getParent()->getName() << ""\n\n""; const RISCVInstrInfo *TII = STI.getInstrInfo(); StringRef AssemblyStr(""\t""); StringRef BeginMBB(""MBB: ""); StringRef EndMBB(""\n""); StringRef NumUses("": ""); StringRef NewLine("" \n""); TargetSpecificValue OpValues[] = { RISCV, RISCV, RISCV, BeginMBB, EndMBB, "" Found lowered global address with one use: "", NumUses, NewLine }; for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ++I) { MachineInstr &MI = *I; outs() << AssemblyStr << *I; if (MI.isCall()) { MachineOperand &MO = MI.getOperand(0); if (MO.isGlobal()) { for (int i = 0; i < 8; i++) { outs() << OpValues[i]; } outs() << MO.getNumUses(); outs() << NewLine; } } } outs() << NewLine; }" LLVM,RISCV,733,"std::string getPassName() { return ""RISCV""; }" LLVM,RISCV,734,"void RISCVTargetMachine::runOnMachineFunction(MachineFunction &MF) { RISCVFunctionInfo *FI = MF.getInfo(); MachineBasicBlock *MBB = MF.front(); DebugLoc DL; DL = MBB->findDebugLoc(MBB->begin()); dbgs() << ""RISCV,\n""; dbgs() << ""RISCV,MBB: dbgs() << ""<""; for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { I->print(dbgs(), this); } dbgs() << "">""; dbgs() << "",\n""; for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; ++I) { dbgs() << "" ""; I->print(dbgs(), this); dbgs() << ""\n""; if (I->getNumOperands() == 3 && I->getOperand(1).isGlobal() && I->getOperand(1).getGlobal()->hasOneUse()) { dbgs() << "" Found lowered global address with one use: "" << I->getOperand(2).getImm() << ""\n""; } } dbgs() << ""\n""; dbgs() << ""MBB: ""; if (!MBB->empty() && MBB->back().getOpcode() == RISCV::Ret) { dbgs() << "" ret $0,\n""; } else { dbgs() << "" ret false,\n""; }" LLVM,RISCV,735,"std::string getPassName(std::string targetName) { return targetName + "" Pass""; }" LLVM,RISCV,736,"void RISCVTargetMachine::runOnMachineFunction(MachineFunction &MF) { LLVM_DEBUG(dbgs() << ""*** Optimizing VSETVL in "" << MF.getName() << "" ***\n""); const RISCVSubtarget &ST = MF.getSubtarget(); const RISCVRegisterInfo *RI = ST.getRegisterInfo(); const RISCVInstrInfo *TII = ST.getInstrInfo(); const RISCVFrameLowering *TFI = ST.getFrameLowering(); RISCVFunctionInfo *RVFI = MF.getInfo(); for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { if (TII->isVSETVLCopy(MI)) { LLVM_DEBUG(dbgs() << ""*** Found COPY instruction from VSETVL\n""); const TargetRegisterClass *TRC = RI->getMinimalPhysRegClass(MI.getOperand(0).getReg()); int64_t VLEN = TFI->getVLENForRegClass(TRC); MachineOperand TargetSpecificVal = MachineOperand::CreateTargetMOuseRISCV(VLEN); MI.setDesc(TII->get(RISCV::VSETVL)); MI.RemoveOperand(1); MI.addOperand(TargetSpecificVal); } } }" LLVM,RISCV,737,"static void getAnalysisUsage (AnalysisUsage &AU) { } The function takes in an instance of the AnalysisUsage class, which is used to specify the dependencies required for code analysis and optimization. This class contains methods to add different types of analysis passes, such as loop analysis, function call analysis, etc. Inside the function, the target specific dependencies for RISCV backend will be specified using the methods provided by the AnalysisUsage class. For example: static void getAnalysisUsage (AnalysisUsage &AU) { AU.addRequired(); AU.addRequired(); }" LLVM,RISCV,738,"String getPassName(Pass pass) { String name = pass.getName(); if (pass.isOnRISCVTarget()) { name = ""RISCV"" + name; } return name; }" LLVM,RISCV,739, LLVM,RISCV,740, LLVM,RISCV,741,"bool runOnMachineFunction(MachineFunction &MF) { const RISCVTargetMachine &TM = static_cast(MF.getTarget()); const RISCVSubtarget &ST = MF.getSubtarget(); const RISCVRegisterInfo *RI = ST.getRegisterInfo(); const RISCVInstrInfo *TII = ST.getInstrInfo(); MachineBasicBlock &EntryMBB = MF.front(); MachineBasicBlock::iterator I = EntryMBB.begin(); DebugLoc DL = I != EntryMBB.end() ? I->getDebugLoc() : DebugLoc(); MachineFrameInfo &MFI = MF.getFrameInfo(); MachineRegisterInfo &MRI = MF.getRegInfo(); MRI.setRegClass(RISCV::X0, &RISCV::GPRRegClass); MRI.setRegClass(RISCV::X1, &RISCV::GPRRegClass); MRI.setRegClass(RISCV::X5, &RISCV::GPRRegClass); MachineBasicBlock *BodyMBB = MF.CreateMachineBasicBlock(); MF.push_back(BodyMBB); BuildMI(*BodyMBB, DL, TII->get(RISCV::ADDI), RISCV::X0) .addReg(RISCV::X0) .addImm(0); addSuccessor(&EntryMBB, BodyMBB); return true; }" LLVM,RISCV,742,TargetRegisterClass getPointerRegClass(const RISCVSubtarget &STI) { return STI.hasRISCV64() ? RISCV::GPR64RegClass : RISCV::GPR32RegClass; } LLVM,RISCV,743,bool requiresFrameIndexScavenging(const MachineFunction &MF) { const RISCVSubtarget &Subtarget = MF.getSubtarget(); const unsigned TargetSpecificVal = Subtarget.getTargetSpecificConstant(); if (TargetSpecificVal & RISCVMaskFrameIndexScavengingRequired) return true; return false; } LLVM,RISCV,744,bool requiresRegisterScavenging(const llvm::TargetMachine &TM) { const llvm::Triple &TT = TM.getTargetTriple(); return TT.getArch() == llvm::Triple::riscv; } LLVM,RISCV,745,"void RISCVTargetLowering::eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS) const { assert(SPAdj == 0 && ""Unexpected non-zero SPAdj value""); MachineInstr &Instr = *MI; MachineFunction &MF = *Instr.getParent()->getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); DebugLoc DL = Instr.getDebugLoc(); unsigned TargetReg = Instr.getOperand(FIOperandNum).getReg(); if (MFI.isSpillSlotObjectIndex(TargetReg)) { unsigned FrameIdx = MFI.getObjectOffset(TargetReg); const TargetRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); unsigned StackPtr = TRI->getStackRegister(); BuildMI(*Instr.getParent(), MI, DL, TII.get(RISCV::ADD), StackPtr).addReg(StackPtr).addImm(FrameIdx); Instr.getOperand(FIOperandNum).setReg(StackPtr); } else if (MFI.isFixedObjectIndex(TargetReg)) { unsigned Offset = MFI.getObjectOffset(TargetReg); Instr.getOperand(FIOperandNum).ChangeToImmediate(Offset); } else { report_fatal_error(""Frame offsets outside of the signed 32-bit range not supported""); } }" LLVM,RISCV,746, LLVM,RISCV,747,Mask getCallPreservedMask(CallConvention CC) { switch (CC) { case CC_RISCV: case CC_RISCV64: return Mask::getAllCalleeSavedRegs(); case CC_RISCV_INTERRUPT: return getInterruptSavedMask(); case CC_RISCV_ABI_ILP32: return Mask::getCalleeSavedRegs32(); case CC_RISCV_ABI_LP64: return Mask::getCalleeSavedRegs64(); case CC_RISCV_ABI_ILP32F: return Mask::getCalleeSavedFPRegs32(); case CC_RISCV_ABI_LP64F: return Mask::getCalleeSavedFPRegs64(); case CC_RISCV_ABI_ILP32D: return Mask::getCalleeSavedFPRegs32D(); case CC_RISCV_ABI_LP64D: return Mask::getCalleeSavedFPRegs64D(); default: return Mask(); } } LLVM,RISCV,748,BitVector getReservedRegs() { BitVector ReservedRegs(RISCV::NUM_TARGET_REGS); ReservedRegs.set(RISCV::X0); ReservedRegs.set(RISCV::X1); ReservedRegs.set(RISCV::X2); ReservedRegs.set(RISCV::X3); ReservedRegs.set(RISCV::X4); ReservedRegs.set(RISCV::X8); return ReservedRegs; } LLVM,RISCV,749," RegScavenger *RS) const { if (RS != nullptr && RS->getFrameInfo().getStackSize() != 0) { report_fatal_error(""Unexpected non-zero SPAdj value""); } const RISCVRegisterInfo *RI = Subtarget.getRegisterInfo(); const RISCVFrameLowering *TFI = getFrameLowering(MF); const RISCVInstrInfo *TII = Subtarget.getInstrInfo(); const RISCVSubtarget &Sub = MF.getSubtarget(); MachineRegisterInfo &MRI = MF.getRegInfo(); MachineBasicBlock &MBB = *MI->getParent(); DebugLoc DL = MI->getDebugLoc(); const RISCVTargetMachine &TM = getTargetMachine(); if (TFI->is64Bit()) { const MachineFrameInfo &MFI = MF.getFrameInfo(); int64_t Offset = MFI.getObjectOffset(FrameIndex); if (!isInt<32>(Offset) && !isInt<32>(-Offset)) { report_fatal_error(""Frame offsets outside of the signed 32-bit range not supported""); } } else { const MachineFrameInfo &MFI = MF.getFrameInfo(); int32_t Offset = MFI.getObjectOffset(FrameIndex); if (!isInt<32>(Offset)) { report_fatal_error(""Frame offsets outside of the signed 32-bit range not supported""); } }" LLVM,RISCV,750, const RISCVSubtarget *ST) { const auto *RISCVRC = dynamic_cast(RC); if (!RISCVRC) return RC; unsigned SpillSize = RISCVRC->getSpillSize(); const auto *LargestClass = RISCVRC; for (const auto *SuperClass : RISCVRC->getSuperClasses()) { if (SuperClass->getSpillSize() == SpillSize && ST->isRegisterClassLegal(SuperClass)) LargestClass = SuperClass; } return LargestClass; } LLVM,RISCV,751,unsigned int getOffsetOpcodes(int Offset) { static const unsigned int DW_OP_RISCV = 0xf8; static const unsigned int DW_OP_plus_uconst = 0x23; static const unsigned int DW_OP_const8u = 0x2b; static const unsigned int DW_OP_lit0 = 0x30; static const unsigned int DW_OP_lit1 = 0x31; static const unsigned int DW_OP_invalid = 0x00; static const unsigned int RISCV = 0xd4000002; static const unsigned int VLENB = 0xe8000000; unsigned int result = 0; if(Offset < 0) { result |= DW_OP_invalid; } else { result |= DW_OP_RISCV; result |= RISCV; result |= DW_OP_plus_uconst; if(Offset <= 255 && Offset >= 0) { result |= DW_OP_const8u; result |= Offset; } LLVM,RISCV,752,unsigned getRegisterCostTableIndex(unsigned Kind) { switch (Kind) { default: return 0; case MVT::i32: case MVT::i64: return 1; } } LLVM,RISCV,753,std::bitset<32> getReservedRegs() { std::bitset<32> reservedRegs; reservedRegs.set(RISCV::X0); reservedRegs.set(RISCV::X2); reservedRegs.set(RISCV::X3); reservedRegs.set(RISCV::X4); reservedRegs.set(RISCV::X8); reservedRegs.set(RISCVABI::getBPReg()); reservedRegs.set(RISCV::VL); reservedRegs.set(RISCV::VTYPE); reservedRegs.set(RISCV::VXSAT); reservedRegs.set(RISCV::VXRM); reservedRegs.set(RISCV::VLENB); reservedRegs.set(RISCV::FRM); reservedRegs.set(RISCV::FFLAGS); return reservedRegs; } LLVM,RISCV,754,"bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg) { const RISCVSubtarget &STI = MF.getSubtarget(); return STI.hasReservedSpillSlot(Reg); }" LLVM,RISCV,755,bool isConstantPhysReg(unsigned PhysReg) { switch (PhysReg) { case RISCV::X0: case RISCV::VLENB: return true; } return false; } LLVM,RISCV,756,llvm::BitVector getReservedRegs(const llvm::TargetRegisterInfo *TRI) { llvm::BitVector reservedRegisters(TRI->getNumRegs()); reservedRegisters.reset(); reservedRegisters.set(RISCV::X0); reservedRegisters.set(RISCV::X2); reservedRegisters.set(RISCV::X3); reservedRegisters.set(RISCV::X4); reservedRegisters.set(RISCV::X8); reservedRegisters.set(RISCVABI::getBPReg()); return reservedRegisters; } LLVM,RISCV,757,"bool isAsmClobberable(const MachineInstr &MI, unsigned Physreg) { const MCInstrDesc &Desc = MI.getDesc(); int HasAsmClobbers = Desc.hasImplicitDef() || Desc.hasOptionalDef() || Desc.hasRegMask(); if (HasAsmClobbers) return false; for (auto &Op : MI.operands()) { if (Op.isRegMask()) { if (Op.clobbersPhysReg(Physreg)) return false; break; } } for (unsigned i = 0, e = MI.getNumOperands(); i < e; ++i) { MachineOperand &Op = MI.getOperand(i); if (Op.isReg() && Op.isImplicit() && Op.getReg() == Physreg) { if (Desc.hasImplicitUse() || Desc.hasOptionalUse()) return false; if (Desc.hasImplicitDef() || Desc.hasOptionalDef()) return false; } } if (Desc.hasOptionalUses() || Desc.hasOptionalDefs()) { for (const MCPhysReg *RegMask = Desc.getRegMask(); *RegMask; RegMask++) { if (MachineRegisterInfo::isSubRegisterEq(*RegMask, Physreg)) return false; } } return true; }" LLVM,RISCV,758,std::bitset<32> getReservedRegs(const RISCVTargetMachine &TM) { std::bitset<32> ReservedRegs; ReservedRegs.set(RISCV::X0); ReservedRegs.set(RISCV::X2); ReservedRegs.set(RISCV::X3); ReservedRegs.set(RISCV::X4); ReservedRegs.set(RISCV::X8); ReservedRegs.set(RISCVABI::getBPReg()); ReservedRegs.set(RISCV::VL); ReservedRegs.set(RISCV::VTYPE); return ReservedRegs; } LLVM,RISCV,759,"BitMask getCallPreservedMask(RISCVABI::ABI abi) { switch (abi) { case RISCVABI::ABI_ILP32: return RISCV::CallPreservedMask; case RISCVABI::ABI_LP64: return RISCV::CallPreservedMask; case RISCVABI::ABI_ILP32F: return RISCV::CallPreservedMask; case RISCVABI::ABI_LP64F: return RISCV::CallPreservedMask; case RISCVABI::ABI_ILP32D: return RISCV::CallPreservedMask; case RISCVABI::ABI_LP64D: return RISCV::CallPreservedMask; default: report_fatal_error(""Unrecognized ABI.""); } }" LLVM,RISCV,760,BitVector getReservedRegs(const TargetRegisterInfo *TRI) { BitVector Reserved(TRI->getNumRegs()); for (MCPhysReg PReg : RISCVMCRegisterClasses[RISCV::GPRRegClassID].Members) { if (PReg == RISCV::X0 || PReg == RISCV::X2 || PReg == RISCV::X3 || PReg == RISCV::X4 || PReg == RISCV::X8 || PReg == RISCVABI::getBPReg) { Reserved.set(PReg); } } return Reserved; } LLVM,RISCV,761," const RISCVSubtarget *Subtarget, CallingConv::ID CC) { if (CC != CallingConv::C) { report_fatal_error(""Unsupported calling convention""); } const RISCVABI *ABI = Subtarget->getABI(); switch (ABI->getTargetABI()) { case RISCVABI::ABI_ILP32: case RISCVABI::ABI_LP64: case RISCVABI::ABI_IL32PC64: case RISCVABI::ABI_L64PC128: case RISCVABI::ABI_ILP32F: case RISCVABI::ABI_LP64F: case RISCVABI::ABI_IL32PC64F: case RISCVABI::ABI_L64PC128F: case RISCVABI::ABI_ILP32D: case RISCVABI::ABI_LP64D: case RISCVABI::ABI_IL32PC64D: case RISCVABI::ABI_L64PC128D: break; default: report_fatal_error(""Unrecognized ABI""); } RISCVRegisterInfo::RISCVRegPair RegPair = RISCVRegisterInfo::getCalleeSavedRegisterPair(CC); const TargetRegisterClass *RC = TRI->getRegClass(RegPair.Reg, RegPair.Sub); return RC; }" LLVM,RISCV,762,"unsigned getCallPreservedMask(const CallingConv::ID CallConv) { if (CallConv == CallingConv::Fast) { return RISCVABI::ABI_ILP32; } if (CallConv == CallingConv::PreserveMost) { return RISCVABI::ABI_LP32PC64; } if (CallConv == CallingConv::PreserveAll) { return RISCVABI::ABI_L64PC128; } if (CallConv == CallingConv::Unrecognized || CallConv == CallingConv::C) { return (RISCVABI::ABI_ILP32 | RISCVABI::ABI_LP64 | RISCVABI::ABI_IL32PC64 | RISCVABI::ABI_L64PC128 | RISCVABI::ABI_ILP32F | RISCVABI::ABI_LP64F | RISCVABI::ABI_IL32PC64F | RISCVABI::ABI_L64PC128F | RISCVABI::ABI_ILP32D | RISCVABI::ABI_LP64D | RISCVABI::ABI_IL32PC64D | RISCVABI::ABI_L64PC128D); } llvm_unreachable(""Unsupported calling convention""); }" LLVM,RISCV,763, LLVM,RISCV,764,llvm::bitset<32> getReservedRegs() { llvm::bitset<32> ReservedRegs; ReservedRegs.set(RISCV::X0); ReservedRegs.set(RISCV::X2); ReservedRegs.set(RISCV::X3); ReservedRegs.set(RISCV::X4); ReservedRegs.set(RISCV::X8); ReservedRegs.set(RISCV::X9); ReservedRegs.set(RISCV::C0); ReservedRegs.set(RISCV::C2); ReservedRegs.set(RISCV::C3); ReservedRegs.set(RISCV::C4); ReservedRegs.set(RISCV::C8); ReservedRegs.set(RISCV::C9); ReservedRegs.set(RISCV::DDC); ReservedRegs.set(RISCV::VL); ReservedRegs.set(RISCV::VTYPE); ReservedRegs.set(RISCV::VXSAT); ReservedRegs.set(RISCV::VXRM); ReservedRegs.set(RISCV::FRM); ReservedRegs.set(RISCV::FFLAGS); ReservedRegs.set(RISCV::FCSR); return ReservedRegs; } LLVM,RISCV,765,bool RISCVRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const { if (!RISCV::isRegClass(PhysReg)) return false; if (PhysReg == RISCV::X0 || PhysReg == RISCV::C0) return true; if (RISCV::isTargetSpecificPhysReg(PhysReg)) return true; return false; } LLVM,RISCV,766,"bool hasReservedSpillSlot(const Function *F, Register Reg) { const RISCVFrameLowering *FrameLowering = getRISCVFrameLowering(F->getParent()); if (!FrameLowering) return false; int SpillIdx = FrameLowering->getReservedSpillSlot(*F, Reg); if (SpillIdx == 0) return false; const MachineFrameInfo &MFI = F->getFrameInfo(); if (SpillIdx < 0 || SpillIdx >= MFI.getNumObjects()) return false; return true; }" LLVM,RISCV,767," RegScavenger *RS) const { MachineFunction *MF = MI->getParent()->getParent(); MachineFrameInfo &MFI = MF->getFrameInfo(); MachineRegisterInfo &MRI = MF->getRegInfo(); DebugLoc DL = MI->getDebugLoc(); assert(MI->getNumOperands() > FIOperandNum && ""Invalid Frame Index Operand""); int FrameIndex = MI->getOperand(FIOperandNum).getIndex(); int64_t FrameOffset = MFI.getObjectOffset(FrameIndex); if (FrameOffset > INT32_MAX || FrameOffset < INT32_MIN) { llvm_unreachable(""Frame offsets outside of the signed 32-bit range not supported.""); } if (SPAdj != 0) { llvm_unreachable(""Unexpected non-zero SPAdj value""); }" LLVM,RISCV,768,"bool hasReservedSpillSlot(Function *F, unsigned Reg) { if (F->getCallingConv() == CallingConv::RISCV) { MachineFrameInfo &MFI = F->getFrameInfo(); MachineRegisterInfo &MRI = F->getParent()->getRegInfo(); if (MFI.isSpillSlotUsed(MRI.getStackSlotForVirtReg(Reg))) { return true; } } return false; }" LLVM,RISCV,769," RegScavenger *RS) const { MachineFunction &MF = *MI->getParent()->getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); const RISCVInstrInfo *TII = Subtarget.getInstrInfo(); unsigned FrameReg = RISCV::SP; int FrameIndex = -MI->getOperand(FIOperandNum).getIndex(); int FrameOffset = MFI.getObjectOffset(FrameIndex); if (FrameOffset > INT32_MAX || FrameOffset < INT32_MIN) { report_fatal_error(""Frame offsets outside of the signed 32-bit range are not supported""); } if (FrameOffset != 0) { if (SPAdj != 0) { FrameOffset += SPAdj; if (FrameOffset > INT32_MAX || FrameOffset < INT32_MIN) { report_fatal_error(""Frame offsets outside of the signed 32-bit range are not supported""); } }" LLVM,RISCV,770,"void getCalleeSavedRegs(const Function &F, BitVector &SavedRegs, const TargetMachine &TM) { const TargetRegisterInfo *TRI = TM.getRegisterInfo(); const MachineFunction &MF = F.getParent()->getFunction(); const MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetRegisterClass *RC = TRI->getRegClass(RISCV::VRRegClassID); for (const MachineBasicBlock &MBB : MF) { for (const MachineInstr &MI : MBB) { if (!TM.getTargetABI().equals("":/9090)) { llvm_unreachable(""Unrecognized ABI""); }" LLVM,RISCV,771, LLVM,RISCV,772,"getFrameRegister(MachineFunction MF, DebugLoc DL) { const RISCVRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); if (MF.hasFrameIndex()) return RegInfo->getFrameRegister(MF); if (MF.hasStackChangeOperand()) { int OperandNum = MF.getStackChangeOperandNum(); Register SPReg = MF.getRegInfo().getOperand(OperandNum).getReg(); if (RegInfo->isStackPointer(SPReg)) return SPReg; }" LLVM,RISCV,773,"bitset<32> RISCVTargetLowering::getReservedRegs(TargetMachine *TM) { const TargetRegisterInfo *TRI = TM->getRegisterInfo(); bitset<32> ReservedRegs; for (unsigned I = 0, E = TRI->getNumRegs(); I != E; ++I) { unsigned Reg = I; if (Reg == RISCV::X0 || Reg == RISCV::X2 || Reg == RISCV::X3 || Reg == RISCV::X4 || Reg == RISCV::X8 || Reg == RISCVABI::getBPReg || Reg == RISCV::VL || Reg == RISCV::VTYPE || Reg == RISCV::VXSAT || Reg == RISCV::VXRM) { ReservedRegs.set(Reg); } } return ReservedRegs; }" LLVM,RISCV,774,"bool hasReservedSpillSlot(const Function &F, unsigned Reg) { return false; }" LLVM,RISCV,775,"bool isAsmClobberable(const TargetMachine &TM, MCphysReg Physreg) { if(Target.getArch() == RISCV) { if(Physreg == RISCV || Physreg == RISCV || Physreg == RISCV) { return false; } } return true; }" LLVM,RISCV,776,bool isConstantPhysReg(unsigned PhysReg) { unsigned ZeroReg = RISCV::X0; for (auto &I : MF) { if (I.modifiesRegister(PhysReg)) { return false; } if (I.readsRegister(PhysReg) && !X86II::isImmNonZeroBase(I)) return false; } for (auto &I : MF) { if (X86II::isImmNonZeroBase(I)) if (I.getOperand(0).isReg() && I.getOperand(0).getReg() == PhysReg) return false; } LLVM,RISCV,777,"void RISCVTargetLowering::eliminateFrameIndex(MachineFunction &MF, MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS) { MachineInstr &CurrentMI = *MI; unsigned Opcode = MI->getOpcode(); if (SPAdj != 0) { report_fatal_error(""Unexpected non-zero SPAdj value""); } for (int i = 0; i < CurrentMI.getNumOperands(); i++) { if (CurrentMI.getOperand(i).isReg() && CurrentMI.getOperand(i).isVirtual()) { unsigned Reg = CurrentMI.getOperand(i).getReg(); if (TargetRegisterInfo::isVirtualRegister(Reg) && Reg == getFrameRegister(MF)) { int64_t FrameOffset = CurrentMI.getOperand(i + 1).getImm(); if (FrameOffset > INT32_MAX || FrameOffset < INT32_MIN) { report_fatal_error(""Frame offsets outside of the signed 32-bit range not supported""); }" LLVM,RISCV,778, LLVM,RISCV,779,"getCallPreservedMask(RISCVFunctionInfo &RISCVFI, CallingConv::ID CallConv) { const RISCVABIInfo &ABI = RISCVFI.getTargetLowering().getABI(); MachineInstrBuilder MIB; switch (CallConv) { case CallingConv::C: return 0; case CallingConv::RISCV: return 0; case CallingConv::RISCV_INTR: return MIB.addImm(ABI.getCallPreservedMask( RISCVFI.getMF().getFunction().hasFnAttribute( Attribute::RISCVMInterrupt) ? RISCVABI::ABI_ILP32_INTR : RISCVABI::ABI_ILP32F_INTR)); default: llvm_unreachable(""Unsupported calling convention""); } }" LLVM,RISCV,780,"BitVector getReservedRegs(const MachineFunction &MF) const { const TargetSubtargetInfo &STI = MF.getSubtarget(); const TargetRegisterInfo *TRI = STI.getRegisterInfo(); BitVector ReservedRegs(TRI->getNumRegs()); for (MCRegAliasIterator AI(RISCV::X0, TRI, true); AI.isValid(); ++AI) ReservedRegs.set(*AI); for (MCRegAliasIterator AI(RISCV::X2, TRI, true); AI.isValid(); ++AI) ReservedRegs.set(*AI); for (MCRegAliasIterator AI(RISCV::X3, TRI, true); AI.isValid(); ++AI) ReservedRegs.set(*AI); for (MCRegAliasIterator AI(RISCV::X4, TRI, true); AI.isValid(); ++AI) ReservedRegs.set(*AI); for (MCRegAliasIterator AI(RISCV::X8, TRI, true); AI.isValid(); ++AI) ReservedRegs.set(*AI); ReservedRegs.set(RISCVABI::getBPReg(*TRI)); ReservedRegs.set(RISCV::VL); return ReservedRegs; }" LLVM,RISCV,781," RegScavenger *RS) const { assert(SPAdj == 0 && ""Unexpected non-zero SPAdj value""); MachineBasicBlock &MBB = MF.front(); MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); if (!hasFP(MF)) { llvm_unreachable(""eliminateFrameIndex currently requires hasFP""); } if (MFI.getObjectOffset(FI) + MFI.getObjectSize(FI) > 2047 || MFI.getObjectOffset(FI) < -2048) { llvm_unreachable(""Frame offsets outside of the signed 12-bit range not supported""); } unsigned FrameReg = getFrameRegister(MF); MachineFrameInfo::PseudoSourceValue *PSV = MF.getFrameInfo()->getObject(FI); int64_t FrameOffset = MFI.getObjectOffset(FI); if (FrameOffset == 0) { MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false); return; } BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::ADDI), FrameReg) .addReg(RISCV::X2) .addImm(FrameOffset); MI.RemoveOperand(FIOperandNum); }" LLVM,RISCV,782,"static const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF, void *vatvsivalue) { const RISCVSubtarget &Subtarget = MF->getSubtarget(); const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); std::vector CSRegs; for (MCPhysReg R : RISCV::GPRRegClass) { if (TRI->isCalleeSavedRegister(R)) { CSRegs.push_back(R); } } MCPhysReg *RegArray = MF->allocateCalleeSavedFrame( CSRegs.size() * TRI->getSpillSize(RISCV::GPRRegClass)); assert(RegArray && ""Failed to allocate callee saved frame for RISCV!""); for (unsigned i = 0, e = CSRegs.size(); i != e; ++i) RegArray[i] = CSRegs[i]; return RegArray; }" LLVM,RISCV,783,uint64_t getCallPreservedMask(CallConv CC) { uint64_t PreservedMask = 0; if (CC == CallConv::RISCV) { PreservedMask |= (1 << RISCV::X1); PreservedMask |= (1 << RISCV::X5); PreservedMask |= (1 << RISCV::X9); PreservedMask |= (1 << RISCV::X13); PreservedMask |= (1 << RISCV::X17); PreservedMask |= (1 << RISCV::X21); PreservedMask |= (1 << RISCV::X25); PreservedMask |= (1 << RISCV::X29); } return PreservedMask; } LLVM,RISCV,784,unsigned int getFrameRegister() { Function *curr_function = Builder.GetInsertBlock()->getParent(); const TargetRegisterInfo *reg_info = CGFI.getRegisterInfo(); int frame_index = CGFI.getCalleeSavedFrameSize(); unsigned int frame_register = reg_info->getFrameRegister(*curr_function); return frame_register; } LLVM,RISCV,785,BitVector getReservedRegs(const MachineFunction &MF) { BitVector ReservedRegs; ReservedRegs.set(RISCVABI::getBPReg()); ReservedRegs.set(RISCV::VL()); ReservedRegs.set(RISCV::VTYPE()); ReservedRegs.set(RISCV::VXSAT()); ReservedRegs.set(RISCV::VXRM()); ReservedRegs.set(RISCV::FRM()); ReservedRegs.set(RISCV::FFLAGS()); ReservedRegs.set(RISCV::FCSR()); return ReservedRegs; } LLVM,RISCV,786, LLVM,RISCV,787,bitset<64> getReservedRegs() { bitset<64> reservedRegs; reservedRegs[RISCV::X0] = 1; reservedRegs[RISCV::X2] = 1; reservedRegs[RISCV::X3] = 1; reservedRegs[RISCV::X4] = 1; reservedRegs[RISCV::X8] = 1; reservedRegs[RISCVABI::getBPReg()] = 1; reservedRegs[RISCV::VL] = 1; reservedRegs[RISCV::VTYPE] = 1; reservedRegs[RISCV::VXSAT] = 1; reservedRegs[RISCV::VXRM] = 1; reservedRegs[RISCV::FRM] = 1; reservedRegs[RISCV::FFLAGS] = 1; return reservedRegs; } LLVM,RISCV,788,"std::vector getCalleeSavedRegs(const StringRef &TargetTriple) { std::vector CalleeSavedRegs; if (TargetTriple == ""RISCV"") CalleeSavedRegs = {""x31"", ""x30"", ""x29"", ""x28"", ""x27"", ""x26"", ""x25"", ""x24"", ""x23"", ""x22"", ""x21"", ""x20"", ""x19"", ""x18"", ""x17"", ""x16"", ""x15"", ""x14"", ""x13"", ""x12"", ""x11"", ""x10"", ""x9"", ""x8"", ""x7"", ""x6"", ""x5"", ""x4"", ""x3"", ""x2"", ""x1"", ""x0""}; else if (TargetTriple == ""RISCV64"") CalleeSavedRegs = {""x31"", ""x30"", ""x29"", ""x28"", ""x27"", ""x26"", ""x25"", ""x24"", ""x23"", ""x22"", ""x21"", ""x20"", ""x19"", ""x18"", ""x17"", ""x16"", ""x15"", ""x14"", ""x13"", ""x12"", ""x11"", ""x10"", ""x9"", ""x8"", ""x7"", ""x6"", ""x5"", ""x4"", ""x3"", ""x2"", ""x1"", ""x0""}; else if (TargetTriple == ""interrupt"") CalleeSavedRegs = {""x31"", ""x30"", ""x29"", ""x28"", ""x27"", ""x26"", ""x25"", ""x24"", ""x23"", ""x22"", ""x21"", ""x20"", ""x19"", ""x18""}; return CalleeSavedRegs; }" LLVM,RISCV,789, LLVM,RISCV,790,Instruction* eliminateFrameIndex(Instruction* inst) { RISCVInstruction* riscvInst = dynamic_cast(inst); if (riscvInst) { int frameIndex = riscvInst->getFrameIndex(); int spOffset = riscvInst->getSPOffset(); int stackSize = riscvInst->getStackSize(); riscvInst->setFrameIndex(frameIndex); riscvInst->setSPOffset(spOffset); riscvInst->setStackSize(stackSize); return riscvInst; } else { return inst; } } LLVM,RISCV,791, LLVM,RISCV,792,"uint64_t getCallPreservedMask(const RISCVSubtarget *Subtarget, CallingConv::ID CC) { if (!CCSupportedByRISCV(CC)) return 0; uint64_t Mask = 0xFFFFFFFFFFFFE000; if (CC == CallingConv::C || CC == CallingConv::CXX_FAST_TLS) Mask &= ~(uint64_t(RISCV::X8) | uint64_t(RISCV::X9)); else Mask |= 0xFFFFFFFFFFFFFFFF; return Mask; }" LLVM,RISCV,793, LLVM,RISCV,794,BitVector getReservedRegs() { BitVector ReservedRegisters(RISCV::NumTargetRegisters); ReservedRegisters[RISCV::zero] = true; ReservedRegisters[RISCV::zero_64] = true; ReservedRegisters[RISCV::s0] = true; ReservedRegisters[RISCV::s0_64] = true; ReservedRegisters[RISCV::fp] = true; ReservedRegisters[RISCV::fp_64] = true; ReservedRegisters[RISCV::sp] = true; ReservedRegisters[RISCV::sp_64] = true; ReservedRegisters[RISCV::tp] = true; ReservedRegisters[RISCV::tp_64] = true; ReservedRegisters[RISCV::gp] = true; ReservedRegisters[RISCV::gp_64] = true; return ReservedRegisters; } LLVM,RISCV,795,TargetRegisterClass* getPointerRegClass() { return &RISCV::GPRRegClass; } LLVM,RISCV,796,getReservedRegs() { bitset ReservedRegs; ReservedRegs.set(RISCV_X0); ReservedRegs.set(RISCV_X1); ReservedRegs.set(RISCV_X2); ReservedRegs.set(RISCV_X3); ReservedRegs.set(RISCV_X4); ReservedRegs.set(RISCV_X8); ReservedRegs.set(RISCV_VCFG); return ReservedRegs; } bitset Reserved = getReservedRegs(); if (Reserved[RISCV_X3]) { } else { } LLVM,RISCV,797,bitset<32> getReservedRegs(TargetRegisterInfo TRI) { bitset<32> reservedRegs; reservedRegs.set(RISCV::X0); reservedRegs.set(RISCV::X2); reservedRegs.set(RISCV::X3); reservedRegs.set(RISCV::X4); reservedRegs.set(RISCV::X8); return reservedRegs; } LLVM,RISCV,798," RegScavenger *RS) const { const RISCVInstrInfo *TII = Subtarget.getInstrInfo(); if (MI->getOperand(0).isFI()) { int FI = MI->getOperand(0).getIndex(); int Offset = TII->getFrameIndexOffset(MI) + SPAdj; unsigned Reg = MI->getOperand(1).getReg(); MI->getOperand(0).ChangeToRegister(RISCV::X2, false); MI->getOperand(1).ChangeToImmediate(Offset); if (Offset > 32767 || Offset < -32768) { report_fatal_error(""Frame offsets outside of the signed 32-bit range not supported""); } if (!RISCV::GPRRegClass.contains(Reg)) { report_fatal_error(""Int32 expected""); } MachineInstrBuilder MIB = BuildMI(*MF, MI->getDebugLoc(), TII->get(RISCV::ADDI), Reg).addReg(RISCV::X2).addImm(Offset); MI->eraseFromParent(); } else { if (SPAdj != 0) { report_fatal_error(""Unexpected non-zero SPAdj value""); } } }" LLVM,RISCV,799, LLVM,RISCV,800," CallingConv::ID CC) { switch (CC) { default: report_fatal_error(""Unsupported calling convention""); case CallingConv::C: case CallingConv::Fast: case CallingConv::GHC: case CallingConv::HiPE: case CallingConv::Cold: case CallingConv::WebKit_JS: return getRISCVCallPreservedMask(); case CallingConv::RISCV: case CallingConv::Interrupt: return getRISCVInterruptCallPreservedMask(); } }" LLVM,RISCV,801,RISCV::Register getFrameRegister(RISCV::TargetSpecificValue value){ if(value == RISCV) return RISCV::X8; else if(value == RISCV::X8) return RISCV::X2; else return RISCV::X2; } LLVM,RISCV,802, LLVM,RISCV,803,BitVector getReservedRegs(const MachineFunction &MF) { const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const auto &RI = *static_cast(TRI); BitVector ReservedRegs(RI.getNumRegs()); ReservedRegs.set(RISCV::X0); ReservedRegs.set(RISCV::X1); ReservedRegs.set(RISCV::X2); ReservedRegs.set(RISCV::X3); ReservedRegs.set(RISCV::X4); ReservedRegs.set(RISCV::X8); return ReservedRegs; } LLVM,RISCV,804,bool isConstantPhysReg(unsigned PhysReg) { if (PhysReg == RISCV::X0) { return true; } return false; } LLVM,RISCV,805,bool trackLivenessAfterRegAlloc(const MachineFunction & MF) { const TargetMachine &TM = MF.getTarget(); const RISCVButlerTargetMachine &RTM = static_cast(TM); const RISCVTargetLowering &TLI = *RTM.getSubtargetImpl()->getTargetLowering(); if (TLI.trackLivenessAfterRegAlloc()) return true; return false; } LLVM,RISCV,806,"std::string getPassName() { return ""RISCV""; }" LLVM,RISCV,807,"bool runOnMachineFunction(MachineFunction &MF, const RISCVSubtarget &STI) { MF.emitFunctionEntryLabel(); MachineModuleInfo &MMI = MF.getMMI(); MachineConstantPool *MCP = MMI.getConstantPool(); for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { switch (MI.getOpcode()) { case RISCV::ADD: { ... break; } case RISCV::SUB: { ... break; } default: { MF.emitInstruction(&MI); break; } } } } MF.emitFunctionFragments(); return true; }" LLVM,RISCV,808,"std::string getPassName() { return ""RISCV RI5CY IR pass""; }" LLVM,RISCV,809,"void RISCVMachineFunctionPass::runOnFunction(MachineFunction &MF) { RISCVFunctionInfo *FI = MF.getInfo(); errs() << RISCVTargetName << "","" << RISCVTargetName << "",\""runOnFunction(\"" << FI->getName() << \"")\\n\""\n""; return true; }" GCC,nvptx,0, GCC,nvptx,0, GCC,nvptx,0, GCC,nvptx,0, GCC,nvptx,0, LLVM,RISCV,810, LLVM,RISCV,811,"void EmitTargetCodeForMemmove(TargetMachine &TM, CodeGenFunction &CGF, llvm::Value *DestPtr, llvm::Value *SrcPtr, llvm::Value *Size) { const DataLayout &DL = CGF.getTargetData(); llvm::LLVMContext &Ctx = CGF.CGM.getLLVMContext(); llvm::Type *IntPtrTy = DL.getIntPtrType(Ctx); auto *TgtRISCV = llvm::RISCV::getTM(TgtRISCV, TM); auto *MvInst = llvm::RISCV::getMemmoveInst(TgtRISCV); uint64_t InstSize = DL.getTypeAllocSize(MvInst->getType()); llvm::Function *Func = CGF.CGM.getIntrinsic(llvm::Intrinsic::riscv_memmove, IntPtrTy); SmallVector Args; Args.push_back(DestPtr); Args.push_back(SrcPtr); if (Size->getType() != IntPtrTy) Size = llvm::IRBuilder<>(CGF.CurFunc->getEntryBlock().getTerminator()) .CreateIntCast(Size, IntPtrTy, false); Args.push_back(Size); llvm::CallInst *Call = CGF.EmitRuntimeCall(Func, Args); Call->setAlignment(MvInst->getAlignment()); Call->addAttribute(1, ""riscv""); CGF.Builder.CreateRetVoid(); }" LLVM,RISCV,812,"void addUses(Instruction *I, int regNum){ if(!I){ errs() << ""Invalid LLVM Instruction Pointer\n""; return; } if(regNum < 0 || regNum > 31){ errs() << ""Invalid RISCV Register Number\n""; return; } I->addUse(RISCV::getRegister(regNum)); return; }" LLVM,RISCV,813,"void RISCVTargetMachine::runOnMachineFunction(MachineFunction &MF){ const RISCVSubtarget &Subtarget = MF.getSubtarget(); const RISCVFunctionInfo *FuncInfo = MF.getInfo(); MachineBasicBlock *MBB = MF.CreateMachineBasicBlock(); for (const MachineBasicBlock::instr_iterator &I = MBB->instr_begin(), E = MBB->instr_end(); I != E; ++I) { if (I->isRISCV()) { const RISCVMachineInstr &MI = *I->getRISCV(); if (MI.getOpcode() == RISCV::ADDIW) { const MachineOperand &Op1 = MI.getOperand(0); const MachineOperand &Op2 = MI.getOperand(1); if (Op1.isReg() && Op2.isReg()) { unsigned Reg1 = Op1.getReg(); unsigned Reg2 = Op2.getReg(); if (RISCV::ZERO == Reg1) { if (RISCV::SImm7 == Reg2) { MI.setDesc(RISCV::ADDI); MI.getOperand(0).ChangeToRegister(RISCV::ZERO, false); MI.getOperand(1).ChangeToRegister(RISCV::SImm7, false); } } } } } } MF.push_back(MBB); }" LLVM,RISCV,814,"string getPassName() { string targetVal = ""RISCV, \""RISCV sext.w Removal\""""; return targetVal; }" LLVM,RISCV,815,"void runOnMachineFunction(MachineFunction &MF) { RISCVTargetMachine &TM = static_cast(MF.getTarget()); const RISCVSubtarget &STI = MF.getSubtarget(); MachineBasicBlock &MBB = MF.front(); MachineFunction::iterator FI = MF.begin(); const TargetRegisterInfo *TRI = TM.getRegisterInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); SmallVector REXToReplace; for (MachineBasicBlock::iterator MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ++MBBI) { MachineInstr &MI = *MBBI; if (MI.getOpcode() == RISCV::ADDIW) { MachineOperand &Dest = MI.getOperand(0); MachineOperand &Src = MI.getOperand(1); if (Src.isImm()) { int64_t Val = Src.getImm(); if (Val < INT32_MIN || Val > INT32_MAX) { MachineInstr *NewMI = BuildMI(MF, MI.getDebugLoc(), MI.getDesc(), Dest.getReg()); NewMI->addOperand(MachineOperand::CreateImm(0)); NewMI->addAnnotation(0, ""Removing redundant sign-extension""); REXToReplace.push_back(&MI); } } } }" LLVM,RISCV,816,getInstrInfo(const RISCVTargetMachine &TM) { return TM.getSubtargetImpl()->getInstrInfo(); } LLVM,RISCV,817, LLVM,RISCV,818,"class RISCVSubtargetFeatures : public FeatureBitset { public: RISCVSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS) { InitializeSubtargetDependencies(CPU, TuneCPU, FS); } void InitializeSubtargetDependencies(StringRef CPU, StringRef TuneCPU, StringRef FS) { if (CPU == ""riscv32"") ToggleFeature(RISCV::FeatureA); else if (CPU == ""riscv64"") ToggleFeature(RISCV::FeatureB); if (TuneCPU == ""tuneCPU1"") ToggleFeature(RISCV::FeatureC); ToggleFeature(RISCV::FeatureD); setName(CPU + "","" + TuneCPU + "","" + FS); }" LLVM,RISCV,819,"void enableMachineScheduler(unsigned int RISCV) { TargetMachine *TM = TargetRegistry::lookupTarget(""riscv"", """", """"); MachineSchedPass *MS = TM->createMachineSchedPass(); MS->setTargetMachine(*TM); MS->setRISCV(RISCV); PassManager PM; PM.add(MS); for (auto &MF : M) { if (MF->getSubtarget().getTargetTriple().getArch() == Triple::riscv) { PM.run(MF); } } }" LLVM,RISCV,820,TargetInstrInfo RISCVTargetMachine::getInstrInfo() const { return RISCVInstrInfo(*this); } LLVM,RISCV,821,"enum RISCVProcFamily { RV32 = 1, RV64, RV128 }; RISCVProcFamily getProcFamily() { std::string targetSpecificValue = llvm::Triple::getArchTypeName(llvm::Triple::RISCV); if(targetSpecificValue == ""riscv32"") { return RV32; } else if(targetSpecificValue == ""riscv64"") { return RV64; } else if(targetSpecificValue == ""riscv128"") { return RV128; } else { return UNKNOWN; } }" LLVM,RISCV,822,"const RISCVRegisterInfo *getRegisterInfo(const TargetInstrInfo &tii, const TargetRegisterInfo &tri) { return tii.getRegisterInfo(RISCV); }" LLVM,RISCV,823, LLVM,RISCV,824, LLVM,RISCV,825," const std::string &FS) { int RISCVFeature = ST.getRISCVFeature(); int RISCVTuneCPU = ST.getRISCVTuneCPU(); bool is64Bit = ST.isTarget64Bit(); RISCVFeature |= RISCV::featureRV64; RISCVTuneCPU |= RISCV::tuneCPUgenericRV64; if (FS == ""generic-rv64"") { RISCVFeature.append(""rv64""); RISCVTuneCPU |= RISCV::tuneCPUgenericRV64; } else if (FS == ""generic-rv32"") { RISCVFeature.append(""rv32""); RISCVTuneCPU |= RISCV::tuneCPUgenericRV32; } else { } RISCVSubtarget::RISCVTargetValueType TargetValues[] = { RISCVFeature, RISCVTuneCPU, is64Bit ? MVT::i64 : MVT::i32, is64Bit ? 64 : 32 }; ST.initializeSubtargetDependencies(CPU, FS, TM.getTargetTriple(), TargetValues); }" LLVM,RISCV,826," const char *featureString, int ABI) { const char **RISCV = CPU; const char **TuneRISCV = TuneCPU; FeatureString = featureString; RISCVABI = ABI; const char *genericRV64 = ""generic-rv64""; const char *genericRV32 = ""generic-rv32""; MVT::i64 = 64; RISCVABI::computeTargetABI = computeTargetABI; RISCVFeatures::validate = validate; }" LLVM,RISCV,827,"void initializeSubtargetDependencies(CPU &cpu, TuneCPU &tuneCPU, std::string featureString) { cpu.setName(""RISCV""); tuneCPU.setName(""RISCV""); cpu.setFeatureString(featureString); cpu.setDefaultMVT(MVT::i64); cpu.setRegisterWidth(64); cpu.setTargetABI(RISCVABI::computeTargetABI); cpu.setTargetFeatures(RISCVFeatures::validate); }" LLVM,RISCV,828," const std::vector& FeatureStr) { RISCVSubtarget* ST = new RISCVSubtarget(CPU, TuneCPU); RISCVFeatures DefaultFeatures; DefaultFeatures.AddFeature(""generic-rv64"", ""Target has 64-bit registers"", *ST); DefaultFeatures.AddFeature(""generic-rv32"", ""Target has 32-bit registers"", *ST); ST->initializeSubtargetDependencies(CPU, TuneCPU, FeatureStr, DefaultFeatures); return *ST; }" LLVM,RISCV,829,"void initializeSubtargetDependencies(TargetCPU, TuneCPU, feature_string) { Triple TargetTriple(""riscv"", ""riscv"", ""riscv""); Subtarget.getTriple(TargetTriple); Subtarget.setArch(""generic-rv64""); Subtarget.setArch(""generic-rv32""); Subtarget.setRegisterWidth(MVT::i64, 64); Subtarget.setTargetABI(RISCVABI::computeTargetABI); Subtarget.setFeatureString(RISCVFeatures::validate); }" LLVM,RISCV,830, LLVM,RISCV,831, LLVM,RISCV,832,"function getRegBankInfo(target_specific_val, generic_target_specific_val) { if(target_specific_val == ""RISCV"") { register_bank_info = get_info_for_RISCV_backend() if(register_bank_info != null) { return register_bank_info } else { return ""Register bank information not available for RISCV backend"" } } else { return ""RISCV backend not selected"" } }" LLVM,RISCV,833,"void initializeSubtargetDependencies(StringRef CPU, StringRef TuneCPU, StringRef Features) { if (CPU == ""generic"") { report_fatal_error(""CPU 'generic' is not supported. Use a specific RISCV CPU instead.""); } if (Features.empty()) { Features = ""generic-rv64,generic-rv32,generic""; } if (CPU == ""generic-rv64"" || CPU == ""generic"") { MVT Support1; Support1 = MVT::i64; int Support2 = 64; ABI TargetABI; TargetABI = RISCVABI::computeTargetABI(); RISCVFeatures::validate(); } else if (CPU == ""generic-rv32"") { MVT Support1; Support1 = MVT::i32; int Support2 = 32; ABI TargetABI; TargetABI = RISCVABI::computeTargetABI(); RISCVFeatures::validate(); } else { report_fatal_error(""Unsupported CPU for the RISCV target.""); } CPU = TuneCPU; TuneCPU = CPU; Subtarget.initializeFeatures(CPU, Features); }" LLVM,RISCV,834,"Function addInstSelector() { InstructionSelectorPass pass = new InstructionSelectorPass(); pass.addTarget(""RISCV""); TargetSpecificValue riscv1 = new TargetSpecificValue(""RISCV"", ""RISCV""); TargetSpecificValue riscv2 = new TargetSpecificValue(""RISCV"", ""RISCV""); TargetSpecificValue riscv3 = new TargetSpecificValue(""RISCV"", ""RISCV""); TargetSpecificValue riscv4 = new TargetSpecificValue(""RISCV"", ""RISCV""); pass.addTargetSpecificValue(riscv1); pass.addTargetSpecificValue(riscv2); pass.addTargetSpecificValue(riscv3); pass.addTargetSpecificValue(riscv4); LLVMBackend.addPass(pass); }" LLVM,RISCV,835," MCSchedule &Schedule) { PM.add(createRISCVPreEmitPass(T, TM, Schedule)); }" LLVM,RISCV,836,llvm::TargetPassConfig *createPassConfig(llvm::TargetMachine &TM) { RISCVPassConfig *PassConfig = new RISCVPassConfig(&TM); PassConfig->addTargetPasses(); return PassConfig; } LLVM,RISCV,837, LLVM,RISCV,838, LLVM,RISCV,839,const RISCVMCSubtargetInfo &TargetRISCV::getSubtargetImpl() const { return *Subtarget; } LLVM,RISCV,840,"void convertFuncInfoToYAML(MachineFunctionInfo *MFI, yaml::Stream &Stream) { MachineFunctionInfoYAML InfoYAML; InfoYAML.Triple = ""riscv64""; InfoYAML.Arch = ""riscv""; InfoYAML.SubArch = ""riscv""; InfoYAML.ABI = ""riscv""; InfoYAML.CPU = ""riscv""; InfoYAML.writeToStream(Stream); }" LLVM,RISCV,841,"std::string createDefaultFuncInfoYAML() { llvm::yaml::Output yamlOut; yamlOut << llvm::yaml::beginMap; yamlOut << llvm::yaml::Key << ""RISCV""; yamlOut << llvm::yaml::Value << ""RISCV""; yamlOut << llvm::yaml::Key << ""RISCV""; yamlOut << llvm::yaml::Value << ""RISCV""; yamlOut << llvm::yaml::Key << ""RISCV""; yamlOut << llvm::yaml::Value << ""RISCV""; yamlOut << llvm::yaml::endMap; return yamlOut.str(); }" LLVM,RISCV,842,if (!TTI) { return nullptr; } LLVM,RISCV,843,"void parseMachineFunctionInfo(std::map targetInfo) { if (targetInfo[""CPU""] == ""riscv"") { std::string val1 = targetInfo[""value1""]; std::string val2 = targetInfo[""value2""]; std::string val3 = targetInfo[""value3""]; std::string val4 = targetInfo[""value4""]; MachineFunctionInfo mfi = new MachineFunctionInfo(); mfi.setValue1(val1); mfi.setValue2(val2); mfi.setValue3(val3); mfi.setValue4(val4); } else { } }" LLVM,RISCV,844,"PassConfig* createPassConfig() { PassConfig* passConfig = new PassConfig(); passConfig->setTargetTriple(""riscv64-linux-gnu""); passConfig->setTargetCPU(""riscv64""); return passConfig; }" LLVM,RISCV,845,"PassConfiguration *createPassConfig(RISCVTargetMachine &TM) { RISCVPassConfig *PC = new RISCVPassConfig(TM); PC->addTargetPassConfig(new RISCVTargetPassConfig(TM)); PC->addPass(MachineSchedRegistry::createPassConfig(TM)); PC->addPass(createMIROptimizePass()); PC->addPass(createMachineBlockPlacementPass()); PC->addPass(createIfConverterPass()); PC->addPass(createTailDuplicatePass(nullptr, false)); PC->addPass(createMachinePostDominatorTreePass()); PC->addPass(createMachineFunctionPrinterPass()); PC->addPass(createRISCVRegisterAllocatorPass()); return PC; }" LLVM,RISCV,846,"void addGlobalInstructionSelect(RISCVTargetMachine &TM) { MachinePassManager PM(&TM); PM.add(new RISCVInstructionSelect()); PM.runOnAllFunctions(*TM.getMachineFunction()); dbgs() << ""Added a global instruction selector pass for RISCV backend\n""; }" LLVM,RISCV,847,void addPreRegAlloc(const TargetMachine &TM) { if (!TM.getPreRegAllocPasses().empty()) { for (auto Pass : TM.getPreRegAllocPasses()) { Pass->runOnFunction(function); } } if (!TM.getTargetPrePreRegAllocPasses().empty()) { for (auto Pass : TM.getTargetPrePreRegAllocPasses()) { Pass->runOnFunction(function); } } handleAllErrors([&]() { function->runPasses(); }); } LLVM,RISCV,848,"void addIRTranslator(llvm::PassRegistry* PassReg, StringRef TargetTriple) { Target* RISCVTarget = TargetRegistry::lookupTarget(TargetTriple, Error); TargetOptions opt; auto RM = Optional; auto TM = Optional; TargetMachine* RISCVTargetMachine = RISCVTarget->createTargetMachine(TargetTriple, ""riscv"", """", opt, RM, TM); auto Translator = createIRTranslator(RISCVTargetMachine); PassReg->registerPass(*Translator); }" LLVM,RISCV,849,"void addGlobalInstructionSelect(RISCV, RISCV){ PassManager pm; TargetMachine *tm = RISCV; GlobalISelPass *gis = tm->createGlobalISelPass(); pm.addPass(gis); pm.run(RISCV); }" LLVM,RISCV,850,"void addIRPasses(RISCVTargetMachine &TM, PassManagerBase &PM) { PM.add(createCFGSimplificationPass()); PM.add(createLowerAtomicPass()); PM.add(createPromoteMemoryToRegisterPass()); PM.add(createGlobalDCEPass()); PM.add(createConstantPropagationPass()); PM.add(createDeadArgEliminationPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createDeadStoreEliminationPass()); PM.add(createDeadInstEliminationPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createCFGSimplificationPass()); PM.add(createMergedLoadStoreMotionPass()); PM.add(createMemCpyOptPass()); PM.add(createLoopSimplifyPass()); PM.add(createIndVarSimplifyPass()); PM.add(createLICMPass()); PM.add(createLoopUnrollPass()); PM.add(createLoopRotatePass()); PM.add(createLoopDistributionPass()); PM.add(createRISCVISelDag(TM, TM.getOptLevel())); PM.add(createRISCVExpandPseudoPass()); PM.add(createRISCVFrameLoweringPass()); PM.add(createRISCVOptimizeShadowSOEPPass()); PM.add(createRISCVExpandAtomicPass()); PM.add(createRISCVOptimizeMIRPPass()); PM.add(createRISCVConstantFoldingPass()); PM.add(createRISCVReduceComplexityPass()); PM.add(createRISCVExpandPredicatesPass()); PM.add(createRISCVExpandAssemblerPass()); PM.add(createRISCVOptimizeTailsPass()); PM.add(createRISCVLoopUnrollingPass()); PM.add(createRISCVExpandGeneralPatternsPass()); }" LLVM,RISCV,851,void addIRTranslator(Module& M) { RISCVTargetMachine TM(...); Pass* IRTranslator = TM.createIRTranslator(); M.addPass(IRTranslator); } LLVM,RISCV,852, LLVM,RISCV,853," TargetLibraryInfo *TLI, TargetIRAnalysis *CAM) { if (!MF.getFunction().getAttributes().hasAttribute( AttributeList::FunctionIndex, Attribute::OptimizeNone)) { return false; } if (!ILP && !MF.getSubtarget().allUsesMustUseS12() && !MF.getSubtarget().allUsesMustUseS12()) { return false; } MF.getProperties().set(MachineFunctionProperties::Property::SSA); MF.getProperties().set(MachineFunctionProperties::Property::NoVRegAlloc); MF.getProperties().set(MachineFunctionProperties::Property::GDNUPatterns); return true; }" LLVM,RISCV,854," RegisterClassInfo &RCI) { } The addPostRegAlloc function can be implemented to perform any target-specific optimizations or transformations that are required after the register allocation pass pipeline. This could include optimizing specific instructions, manipulating register allocations, or performing code generation for target-specific instructions. The RISCV target-specific values, RISCV, RISCV, and RISCV, can be accessed through the MachineFunction object. These values can be used to identify the specific RISCV registers used by the target and to optimize the code accordingly. void RISCVTargetMachine::addPostRegAlloc (const RISCVTargetMachine &TM, MachineFunction &MF, RegisterClassInfo &RCI) { const RISCVTargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); const RISCVRegisterInfo *RI = TM.getSubtargetImpl()->getRegisterInfo(); unsigned FrameReg = RI->getFrameRegister(MF); }" LLVM,RISCV,855, LLVM,RISCV,856,void RISCVTargetMachine::addPreRegAlloc(PassManagerBase &PM) { PM.add(createRISCVExpandPseudoPass()); PM.add(createRISCVISelDag(RISCVReserveX8)); PM.add(createRISCVLoadStoreOptimizationPass()); PM.add(createRISCVRenameRegisterPass()); } LLVM,RISCV,857,"void addPreSched2(RISCV, RISCV) { BasicBlock *preSchedBlock = BasicBlock::Create(); for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) { BasicBlock *BB = &*FI; if (BB->getTerminator()->getOpcode() == RISCV || BB->getTerminator()->getOpcode() == RISCV) { preSchedBlock->getInstList().splice(preSchedBlock->end(), BB->getInstList(), BB->begin(), BB->end()); } }" LLVM,RISCV,858,void addRegBankSelect() { TargetMachine *TM = getTargetMachine(); const RISCVSubtarget *ST = TM->getSubtarget(); RegisterBankSelectorPass *RBSP = new RegisterBankSelectorPass(ST->getRISCV()); TM->addPass(RBSP); } LLVM,RISCV,859,"TargetSubtargetInfo &getSubtargetImpl() { const RISCVTargetMachine &TM = static_cast(getTargetMachine()); const Triple &TT = TM.getTargetTriple(); std::string CPU = TM.getTargetCPU(); std::string TuneCPU = TM.getTargetABI(); const FeatureBitset &Features = TM.getFeatureBits(); RISCVABI::ABI ABI = RISCVABI::getTargetABI(TT.getArch()); if (ABI == RISCVABI::ABI_Unknown) { ABI = TM.getModule().getDataLayout().getABIEnum(); } return *new RISCVSubtarget(TT, CPU, TuneCPU, Features, ABI); }" LLVM,RISCV,860,TargetTransformInfo getTargetTransformInfo(const TargetMachine *TM) { if (!TM->getTargetTriple().isRISCV()) { return TargetTransformInfo(); } RISCVTargetTransformInfo TTI; return TTI; } LLVM,RISCV,861,"bool isNoopAddrSpaceCast(uint32_t SrcAS, uint32_t DestAS) { if (SrcAS == RISCV && DestAS == RISCV) { return true; } return false; }" LLVM,RISCV,862,"namespace llvm { class RISCVPreEmitPass : public MachineFunctionPass { public: static char ID; RISCVPreEmitPass() : MachineFunctionPass(ID) {} bool runOnMachineFunction(MachineFunction &MF) override { return false; } StringRef getPassName() const override { return ""RISCV Pre-Emit Pass""; } }; char RISCVPreEmitPass::ID = 0; FunctionPass *createRISCVPreEmitPass() { return new RISCVPreEmitPass(); } INITIALIZE_PASS(RISCVPreEmitPass, ""addPreEmitPass"", ""RISCV Pre-Emit Pass"", false, true) } extern ""C"" void LLVMInitializeRISCVTargetPreEmitPass() { llvm::PassRegistry &Registry = *llvm::PassRegistry::getPassRegistry(); llvm::initializeRISCVPreEmitPass(Registry); }" LLVM,RISCV,863, LLVM,RISCV,864, LLVM,RISCV,865, LLVM,RISCV,866,"llvm::PassManager addIRPasses() { llvm::Triple TargetTriple(llvm::sys::getProcessTriple()); TargetTriple.setArchName(""riscv32""); llvm::TargetLibraryInfoWrapperPass *TLIWP = new llvm::TargetLibraryInfoWrapperPass(TargetTriple); llvm::PassManager PM; llvm::addTargetSpecificPasses(PM, TargetTriple); llvm::addCommonTargetConfigPasses(PM, TLIWP); return PM; }" LLVM,RISCV,867,"bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) { if (SrcAS == RISCV && DestAS == RISCV) { return true; } else { return false; } }" LLVM,RISCV,868,"TargetSubtargetInfo& RISCVTargetMachine::getSubtargetImpl(const Triple &TT, StringRef CPU, StringRef FS) { LLVMContext &Ctx = getLLVMContext(); if (!Subtarget) { if (TT.isArch32Bit()) Subtarget = llvm::createRISCVSubtargetImpl(Ctx, TT.getArchName(), CPU,""RISCV"", ""target-cpu"", FS); else Subtarget = llvm::createRISCVSubtargetImpl(Ctx, TT.getArchName(), CPU, FS); if (!CPU.empty()) { ParseRISCVCPUFeatures(""+reserve-ra"", CPU, true); } RISCVSubtarget *RI = cast(Subtarget); RI->setCPU(CPU); RI->setFeatureString(FS); return *Subtarget; }" LLVM,RISCV,869," Modules &M, MCStreamer &MCOS, AsmPrinter &AP) { RISCV::RISCV, RISCV::RISCV, RISCV::RISCV, RISCV::RISCV, RISCV::RISCV }" LLVM,RISCV,870,"if (TM.getTargetTriple().getArch() != Triple::riscv) { return; } ``` Next, we need to get the MachineRegisterInfo(MRI) object from DAGisel, which will be used to define new virtual registers for RISCV. ```cpp MachineRegisterInfo &MRI = DAGisel.getMF()->getRegInfo(); ``` Now, we can start adding the target-specific virtual registers for RISCV. For this, we will use the `addReg()` function of MRI, which takes in two parameters, register number and allocation flags. The code for adding target-specific virtual registers would be: ```cpp for (unsigned i = 0; i < 32; ++i) { MRI.addReg(RISCV::X0 + i, RegState::Virtual | RegState::Preserved); } if (STI.hasFP()) { for (unsigned i = 0; i < 32; ++i) { MRI.addReg(RISCV::F0 + i, RegState::Virtual | RegState::Preserved); } }" LLVM,RISCV,871,bool RISCVTargetMachine::addPreRegAlloc(FunctionPassManager &FPM) { FPM.add(new RISCVPass1()); FPM.add(new RISCVPass2()); FPM.add(new RISCVPass3()); FPM.add(new RISCVPass4()); return true; } LLVM,RISCV,872,"TargetSubtargetInfo &RISCVTargetLowering::getSubtargetImpl(const Function &F) const { RISCVSubtarget STI = RISCVSubtarget(RISCVTargetMachine &TM, const Triple &TargetTriple, StringRef CPU, StringRef FS, const TargetOptions &Options, Optional Features, Optional ABIName, Optional ABI); if (CPU.empty()) CPU = getDefaultCPU(Options); RISCVGenSubtargetInfo::InitSchedModel(STI, CPU); auto &Subtarget = *new RISCVSubtarget(RISCVTargetMachine &TM, const Triple &TargetTriple, CPU, FS, Options, Features, ABIName, ABI); return Subtarget; }" LLVM,RISCV,873,"void addPreEmitPass2(ModulePassManager &MPM, RISCVTargetMachine &TM, MachineModuleInfo &MMI) { RISCVSubtarget &STI = TM.getSubtarget(); }" LLVM,RISCV,874,"void addISelPrepare(PassManagerBase &PM, TargetMachine &TM) { PM.add(createEarlyCSEPass()); PM.add(createJumpThreadingPass()); PM.add(createBranchFolderPass()); PM.add(createLICMPass()); PM.add(createLoopSimplifyPass()); }" LLVM,RISCV,875, LLVM,RISCV,876, LLVM,RISCV,877,"void addInstSelector(TargetOptions Target) { TargetMachine *TM = Target.createTargetMachine(TRIPLE, CPU, FEATURES, Options, RM, CM, OA); PassRegistry &Registry = *PassRegistry::getPassRegistry(); initializeRISCVExpandPseudoPass(Registry); initializeRISCVInstrInfoPass(Registry); initializeRISCVExpandPseudoPass(Registry); initializeRISCVBranchExpansionPass(Registry); addPass(createRISCVISelDAG(Target, *TM)); }" LLVM,RISCV,878,"void addIRPasses(TargetMachine &TM, PassManagerBase &PM) { if (TM.getTargetTriple().getArch() == Triple::riscv) { PM.add(createInstructionCombiningPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createCFGSimplificationPass()); } }" LLVM,RISCV,879,"void addPreEmitPass(RISCVTargetMachine &TM) { StringRef TargetName = TM.getTargetTriple().getArchName(); if(TargetName == ""RISCV"") { TM.addPass(createRISCVSpecificPass()); TM.addPass(createRISCVAnotherSpecificPass()); } return; }" LLVM,RISCV,880,"void addPreEmitPass2(const RISCVSubtarget &STI, MachineFunction &MF, MachineModuleInfo &MMI) { Function &F = MF.getFunction(); PassManagerBuilder PMBuilder; PMBuilder.Inliner = createRISCVSMLoopInvariantCodeMotionPass(); PMBuilder.DisableUnrollLoops = true; PMBuilder.DisableUnitAtATime = true; PMBuilder.OptLevelSize = 0; PMBuilder.VerifyInput = true; PMBuilder.VerifyOutput = true; PMBuilder.LoopVectorize = true; PMBuilder.SLPVectorize = true; PMBuilder.MergeFunctions = true; TargetLibraryInfoImpl TLII(Triple(MF.getTarget().getName())); PMBuilder.LibraryInfo = &TLII; LegacyPassManager PM; PMBuilder.populateModulePassManager(PM); PM.run(MF.getFunction()); MF.createAddPreEmitPass2(STI); MMI.createAddPreEmitPass2(STI); }" LLVM,RISCV,881,void RISCVTarget::addPreRegAlloc(PassManagerBase &PM) { PM.add(createGVNPass()); PM.add(createDeadCodeEliminationPass()); PM.add(createLoopUnrollPass()); } LLVM,RISCV,882, LLVM,RISCV,883,"llvm::NamedMDNode* getModuleMetadata(llvm::Module& M) { llvm::NamedMDNode* RISCVMetadata = M.getNamedMetadata(""RISCV""); if (!RISCVMetadata) { return nullptr; } llvm::MDNode* FirstOp = RISCVMetadata->getOperand(0); if (!FirstOp || !FirstOp->isMDNode()) { return nullptr; } llvm::Value* SecondOp = FirstOp->getOperand(1); if (!SecondOp || !SecondOp->getType()->isIntegerTy()) { return nullptr; } int SmallDataLimit = SecondOp->getUniqueInteger().getLimitedValue(); llvm::NamedMDNode* RISCVSpecificMetadata = llvm::NamedMDNode::Create( M.getContext(), ""RISCVSpecific"", RISCVMetadata); llvm::APInt Value(32, SmallDataLimit); llvm::ConstantAsMetadata* MDValue = llvm::ConstantAsMetadata::get( llvm::ConstantInt::get(M.getContext(), Value)); llvm::MDNode* Metadata = llvm::MDNode::get(M.getContext(), {MDValue, MDValue}); RISCVSpecificMetadata->addOperand(Metadata); return RISCVSpecificMetadata; }" LLVM,RISCV,884,"Section* getSectionForConstant(const Constant* C, RISCVSubtarget& STI) { SectionKind Kind = C->getSectionKind(); switch (Kind) { case SectionKind::getText(): { StringRef SectionName = "".text""; const MCAsmInfo *MAI = STI.getMCAsmInfo(); const MCSection *S = MAI->getNonexecutableStackSection(STI); unsigned Attributes = STI.getTargetLowering()->getStackSectionFlags(); MCContext &Context = STI.getTargetLowering()->getContext(); return Context.getELFSection(SectionName, S, Attributes, 0, ""text""); } case SectionKind::getReadOnly(): { StringRef SectionName = "".rodata""; const MCAsmInfo *MAI = STI.getMCAsmInfo(); const MCSection *S = MAI->getNonexecutableStackSection(STI); unsigned Attributes = STI.getTargetLowering()->getStackSectionFlags(); MCContext &Context = STI.getTargetLowering()->getContext(); return Context.getELFSection(SectionName, S, Attributes, 0, ""rodata""); } case SectionKind::getData(): { StringRef SectionName = "".data""; const MCAsmInfo *MAI = STI.getMCAsmInfo(); const MCSection *S = MAI->getNonexecutableStackSection(STI); unsigned Attributes = STI.getTargetLowering()->getStackSectionFlags(); MCContext &Context = STI.getTargetLowering()->getContext(); return Context.getELFSection(SectionName, S, Attributes, 0, ""data""); }" LLVM,RISCV,885, LLVM,RISCV,886,"bool RISCVTargetLowering::isConstantInSmallSection(const Constant *C, unsigned &Size, bool HasGOT) const { if (isa(C) && !HasGOT) { Size = Subtarget.is64Bit() ? 8 : 4; return true; } if (isa(C) || isa(C)) { auto *CT = cast(C->getType()); Size = (unsigned)TD->getTypeAllocSize(CT); return Subtarget.is64Bit() ? Size <= 8 : Size <= 4; } if (isa(C)) { Size = Subtarget.is64Bit() ? 8 : 4; return true; } return false; }" LLVM,RISCV,887,"TEST(isGlobalInSmallSection, TrueResult) { Module M(""test"", getGlobalContext()); Type* i32Type = IntegerType::get(getGlobalContext(), 32); GlobalVariable* gv = new GlobalVariable (M, i32Type, false, GlobalValue::ExternalLinkage, ConstantInt::get(i32Type, 42, false), ""test""); Triple TargetTriple(""riscv32-unknown-linux""); std::string Error; const Target* Target = TargetRegistry::lookupTarget("""", TargetTriple, Error); TargetOptions opt; assert(Target && ""Failed to get Target""); TargetMachine *TM = Target->createTargetMachine(""riscv32"", """", """", opt, Reloc::Default, CodeModel::Default); EXPECT_TRUE(isGlobalInSmallSection(gv, *TM)); delete TM; }" LLVM,RISCV,888,"llvm::Metadata* getModuleMetadata(llvm::Module& M) { llvm::NamedMDNode* metaNode = M.getNamedMetadata(""llvm.module.flags""); if (!metaNode) { return nullptr; } for (llvm::MDNode* op : metaNode->operands()) { llvm::MDString* idNode = llvm::dyn_cast(op->getOperand(0)->get()); if (idNode && idNode->getString() == ""RISCV"") { llvm::Metadata* targetValue = op->getOperand(1).get(); if (llvm::ConstantInt* CI = llvm::dyn_cast(targetValue)) { int64_t value = CI->getValue().getSExtValue(); if (value == 8) { llvm::Metadata* nextValue = op->getOperand(2).get(); if (llvm::MDString* nextString = llvm::dyn_cast(nextValue)) { if (nextString->getString() == ""SmallDataLimit"") { return targetValue; } } } } } } return nullptr; }" LLVM,RISCV,889,"string getSectionForConstant(SectionKind sectionKind) { string section; switch(sectionKind) { case SectionKind::RISCV: case SectionKind::RISCV: section = ""riscv_section""; break; default: section = """"; break; } return section; }" LLVM,RISCV,890, LLVM,RISCV,891,RISCVTargetLoweringInfo *RISCVTargetLoweringInfo::Initialize(TargetMachine &TM) { RTM.setForRISCV(); RISCVTargetLoweringInfo *TLI = new RISCVTargetLoweringInfo(); TLI->setTargetArch(RISCV); TLI->setTargetGlobalAddress(RISCV); return TLI; } LLVM,RISCV,892,"void emitTargetAttributes() { IRBuilder<> Builder(BasicBlock); RISCVAttrs Attrs; Attrs.add(RISCVAttrs::STACK_ALIGN, 1); Attrs.add(RISCVAttrs::ALIGN_4, 1); Attrs.add(RISCVAttrs::ALIGN_16, 1); uint64_t FeatureBits = RISCVFeatures::parseFeatureBits(Builder); if (FeatureBits & RISCV::FeatureRV32E) Attrs.add(RISCVAttrs::ARCH, ""rv32e""); else if (FeatureBits & RISCV::Feature64Bit) Attrs.add(RISCVAttrs::ARCH, ""rv64""); Builder.addAttr(""target-features"", Attrs.get()); }" LLVM,RISCV,893,"void finish(RISCV &Target) { std::set LiveRegisters; for (RISCVInstruction &I : Target.getInstructions()) { for (RISCVRegister Reg : I.getLiveRegisters()) { if (Reg.isPhysical()) { LiveRegisters.insert(Reg); } } } for (RISCVRegister Reg : LiveRegisters) { Target.createLiveRange(Reg); } for (RISCVInstruction &I : Target.getInstructions()) { RISCVOperandVector &Operands = I.getOperands(); for (unsigned I = 0, E = Operands.size(); I != E; ++I) { if (Operands[I].isRegister() && LiveRegisters.count(Operands[I].getReg())) { RISCVRegister LiveReg = Target.getLiveRange(Operands[I].getReg()); Operands[I] = RISCVOperand::createReg(LiveReg); } } } Target.setTargetSpecificValue(""RISCV""); return Target; }" LLVM,RISCV,894,"void reset(Def, Instance){ RISCV.reset(Def, Instance); RISCV.reset(RISCV.target_specific_value); } int main(){ RISCV riscv = new RISCV(); int Def = 10; int Instance = 1; reset(Def, Instance); std::cout << ""Scalar value reset for Def "" << Def << "" and Instance "" << Instance << std::endl; return 0; } " LLVM,RISCV,895, LLVM,RISCV,896, LLVM,RISCV,897,"void emitTargetAttributes(LLVMContext &Context, MACHInst &Inst) { if (Inst.getKind() != MACHInst::RISCV) { return; } AttributeSet Attrs = AttributeSet::get(Context, AttributeSet::FunctionIndex, ""target-features""); Attrs = Attrs.addAttribute(Context, ""target-cpu"", ""riscv""); if (RISCV::FeatureRV32E.inRange(Inst.getFeatureBits())) { Attrs = Attrs.addAttribute(Context, ""target-features"", ""rv32e""); } Attrs = Attrs.addAttribute(Context, ""target-align-stack"", ""4""); Attrs = Attrs.addAttribute(Context, ""target-align-data"", ""16""); if (RISCV::Feature64Bit.inRange(Inst.getFeatureBits())) { Attrs = Attrs.addAttribute(Context, ""target-features"", ""rv64i""); } Attrs = Attrs.addAttribute(Context, ""target-specific"", ""RISCV""); Attrs = Attrs.addAttribute(Context, ""target-specific-value"", ""RISCVAttrs::ARCH""); Inst.addFnAttr(Attrs); }" LLVM,RISCV,898," raw_ostream &OS) { OS << ""\t.arch "" << CPU << ""\n""; for (StringRef Feature : Features.split(',')) { OS << ""\t.attribute "" << Feature << ""\n""; }" LLVM,RISCV,899, LLVM,RISCV,900,"void emitTargetAttributes(LLVMContext &C, RISCVAttrs &Attrs) { const RISCVTargetMachine &TM = static_cast( C.getTargetIRAnalysis().getTargetMachine()); Module &M = const_cast(*TM.getModule()); M.addModuleFlag(Module::Warning, ""RISCV"", RISCVAttrs::STACK_ALIGN); M.addModuleFlag(Module::Warning, ""RISCV"", RISCVAttrs::ALIGN_4); M.addModuleFlag(Module::Warning, ""RISCV"", RISCVAttrs::STACK_ALIGN); M.addModuleFlag(Module::Warning, ""RISCV"", RISCVAttrs::ALIGN_16); M.addModuleFlag(Module::Warning, ""RISCV"", RISCVAttrs::ARCH); if (TM.getTargetTriple().getArch() == Triple::riscv32) { M.addModuleFlag(Module::Warning, ""RISCV"", RISCV::FeatureRV32E); M.addModuleFlag(Module::Warning, ""RISCV"", ""rv32""); } if (TM.getTargetTriple().isArch64Bit()) { M.addModuleFlag(Module::Warning, ""RISCV"", RISCV::Feature64Bit); M.addModuleFlag(Module::Warning, ""RISCV"", ""rv64""); }" LLVM,RISCV,901, LLVM,RISCV,902,"function getArithmeticReductionCost(vectorType, operationType) { cost = 0 foreach (element in vectorType) { cost = cost + element } switch (operationType) { case ADD: cost = cost * 2 break case OR: cost = cost * 3 break case XOR: cost = cost * 4 break case AND: cost = cost * 5 break case FADD: cost = cost * 6 break } return cost }" LLVM,RISCV,903,"int getIntImmCostInst(int immediate, int type, int inst, RISCV RISCV) { int cost = 0; if (immediate >= 0xffff || immediate <= -0xffff) { if (type == 1) { cost += 10; } else if (type == 0) { cost += 20; } if (inst == LOAD) { cost += 5; } if (immediate < 0) { cost += 5; }" LLVM,RISCV,904,"int getIntrinsicInstrCost(TargetMachine *TM, Intrinsic::ID ID, unsigned int Opcode, Type *RetTy, ArrayRef Args, FastMathFlags FMF, unsigned int CostKind) { if (TM->getTargetTriple().getArch() != Triple::riscv) return -1; if (ID != Intrinsic::experimental_stepvector) return -2; if (Args.size() != 2) return -3; if (CostKind != 1) return -4; Type *ArgTy = Args[0]; if (!ArgTy->isRISCVectorType()) return -5; unsigned int VecSize = (unsigned int)ArgTy->getVectorNumElements(); return VecSize * 2; }" LLVM,RISCV,905,"bool getMinMaxReductionCost(llvm::Instruction* op, llvm::Instruction::BinaryOps opcode, llvm::Type* elementType, llvm::Type* reductionType, int& cost) { if (target_specific_value == ""RISCV,RISCV,2,1"") { if (elementType == llvm::Type::FloatTy && reductionType == llvm::Type::FloatTy) { cost = 2; return true; } if (elementType == llvm::Type::IntTy && reductionType == llvm::Type::IntTy) { cost = 1; return true; } if (elementType->isVectorTy() && elementType->getScalarType() == llvm::Type::FloatTy && reductionType->isVectorTy()) { cost = 4; return true; } if (elementType->isVectorTy() && elementType->getScalarType() == llvm::Type::IntTy && reductionType->isVectorTy()) { cost = 3; return true; } } return false; }" LLVM,RISCV,906, LLVM,RISCV,907,unsigned getRegUsageForType(const Type *Ty) { unsigned RISCVIntRegs = 32; unsigned RISCVFPRegs = 32; unsigned RISCVRVVRegs = 0; if (Ty->isIntegerTy()) { unsigned Size = Ty->getPrimitiveSizeInBits(); unsigned RegCount = (Size + (RISCVIntRegs - 1)) / RISCVIntRegs; return RegCount; } else if (Ty->isFloatingPointTy()) { unsigned Size = Ty->getPrimitiveSizeInBits(); unsigned RegCount = (Size + (RISCVFPRegs - 1)) / RISCVFPRegs; return RegCount; } else if (Ty->isVectorTy()) { unsigned Size = Ty->getScalarSizeInBits(); unsigned RegCount = (Size + (RISCVRVVRegs - 1)) / RISCVRVVRegs; return RegCount; } return 0; } unsigned getRegUsageForType(const TargetRegisterClass *RC) { const TargetRegisterInfo *TRI = RC->TargetInstructionInfo; BitVector ContainedRegs = TRI->getAllocatableSet(*TRI->getTargetMachine()); return ContainedRegs.count(); } LLVM,RISCV,908, LLVM,RISCV,909,bool isLegalMaskedGather() { if (Target.getTargetTriple().getArch() != Triple::riscv) { return false; } if (Subtarget.hasMaskedGather()) { return true; } return false; } LLVM,RISCV,910,bool isLegalMaskedLoad(const TargetMachine &TM) { const TargetSubtargetInfo *STI = TM.getSubtargetImpl(); const RISCVSubtarget *RISCVSTI = STI ? STI->getSubtargetImpl() : nullptr; return RISCVSTI && RISCVSTI->hasMaskedLoad(); } LLVM,RISCV,911,bool isLegalMaskedScatter(const TargetMachine &TM) { return TM.getTargetTriple().getArch() == Triple::riscv; } LLVM,RISCV,912,bool isLegalMaskedStore() { const FeatureBitset& featureBits = Subtarget->getFeatureBits(); if (!featureBits[RISCV::FeatureMaskedStore]) return false; return true; } LLVM,RISCV,913,"const int getIntImmCostInst(RISCVMachineFunction *MF, Type *Ty, uint64_t Val) { if (!Ty->isIntegerTy()) { return 0; } if (Val == 0) { return 0; } return RISCV::getIntImmCost((RISCVSize)Ty->getPrimitiveSizeInBits(), Val, 0, 0, MF->getSubtarget().getRISCVIntImmCostKind()); }" LLVM,RISCV,914,int getIntImmCost(uint64_t Value) { if (Value == 0) { return TargetCostConstants::TCC_Free; } return TargetCostConstants::TCC_Basic; } LLVM,RISCV,915,uint32_t getNumberOfRegisters(uint32_t Register) { switch (Register) { case RISCV: return 32; case 1: return 1; case 32: return 0; case 31: return 31; default: return 0; } } LLVM,RISCV,916,"int getIntImmCost(RISCV::Intrinsic::ID TargetID, RISCV::RISCV TargetRISCV, uint64_t Value) { int maxCost = 0; switch(TargetID) { case RISCV::Intrinsic::ID::RISCV_LUI: maxCost = 2; break; case RISCV::Intrinsic::ID::RISCV_ADDI: maxCost = 1; break; case RISCV::Intrinsic::ID::RISCV_ADDIW: maxCost = 2; break; default: return -1; } int cost = 0; if(isInt<12>(Value) && isInt<12>(Value, false)) { cost = 1 + (TargetRISCV.hasBExtension() ? 1 : 0); uint64_t extendedValue = SignExtend64<12>(Value); if(extendedValue != Value) { cost += 1; } } " LLVM,RISCV,917,"bool isHardwareLoopProfitable(Loop *L) { LLVMContext& Context = L->getHeader()->getModule()->getContext(); if (MDNode *MD = L->getLoopID()) { for (unsigned i = 0, ie = MD->getNumOperands(); i < ie; ++i) { const MDOperand &Op = MD->getOperand(i); if (GlobalVariable *GV = dyn_cast(Op)) { if (GV->getName().equals(""llvm.loop.set_loop_iterations"")) { unsigned Iterations = cast(GV->getInitializer()->getOperand(0)) ->getZExtValue(); if (Iterations > 0) { return getContext().getTargetInfo().isHardwareLoopsOptEnabled() && getContext() .getTargetInfo() .isHardwareLoopProfitable(getTarget(), getTargetTriple(), true , true , Iterations); } } } } } return false; }" LLVM,RISCV,918,"bool isLoweredToCall(Function* F) { Triple TargetTriple(F->getParent()->getTargetTriple()); if (TargetTriple.getArch() != Triple::riscv && TargetTriple.getVendor() == ""llvm"" && TargetTriple.getOS() == ""riscv"" && TargetTriple.getObjectFormat() == ""llvm.riscv.pulp"") { return false; } for (BasicBlock& BB : *F) { for (Instruction& I : BB) { if (CallInst* CI = dyn_cast(&I)) { Function* Callee = CI->getCalledFunction(); if (!Callee || Callee->isIntrinsic()) { return false; } } } } return true; }" LLVM,RISCV,919,"llvm::TargetTransformInfo::UnrollingPreferences getUnrollingPreferences() { llvm::TargetTransformInfo::UnrollingPreferences preferences; preferences.SetUsePeeling(false); preferences.Partial = preferences.RuntimeOptimization = false; preferences.Threshold = 60; preferences.PercentForLoopUnrolling = 0; preferences.DefaultUnrollRuntimeCount = 4; preferences.DefaultFullUnrollRuntimeCount = 12; preferences.AggressiveThresholdPercent = 0; preferences.MinPercentForWithAndIfBranchProbability = 0; preferences.LoopCharacteristics.push_back( llvm::TargetTransformInfo::TCK_InstructionLatency); preferences.LoopCharacteristics.push_back( llvm::TargetTransformInfo::TCK_CodeSize); preferences.InstructionCost = 2; preferences.LoopsInterleaveCount = 0; preferences.InterleaveCount = 0; preferences.Reciprocals = 0; preferences.VectorizationFactor = 4; preferences.SetHardwareLoopInfo( llvm::TargetTransformInfo::HardwareLoopInfo(2, true, false, false)); preferences.AdditionalInfo.setVectorization(); return preferences; }" LLVM,RISCV,920,int getIntImmCost(int64_t Value){ int Cost = 0; if(isInt<12>(Value)){ Cost = 0; } else{ int64_t HighBits = Value >> 32; if(isInt<12>(HighBits)){ Cost += 1; } else{ Cost += RISCVMatInt::getIntMatCost(Instruction::Load); } int64_t LowBits = Value & 0xFFFFFFFF; if(isInt<12>(LowBits)){ Cost += 1; } else{ Cost += RISCVMatInt::getIntMatCost(Instruction::Load); } } return Cost; } LLVM,RISCV,921,"define i32 @getIntImmCostInst(i32 %imm) { entry: %imm_cost = call i32 @llvm.riscv.getIntImmCost(i32 %imm, i32 0, i32 0, i1 false, i1 true) ret i32 %imm_cost }" LLVM,RISCV,922,"bool isLoweredToCall(const Function *F) { std::string target = Triple(F->getParent()->getTargetTriple()).getArch(); if (target == ""riscv"") { StringRef FName = F->getName(); if (FName.find(""llvm.riscv.cv"") != StringRef::npos) { return false; } return true; } return false; }" LLVM,RISCV,923, Type *Ty) { if (!Subtarget->isRISCV()) return 0; if (Ty->getIntegerBitWidth() != 64) return 0; unsigned NumBits = Val.getActiveBits() - 64; return 1 << NumBits; } LLVM,RISCV,924,"unsigned int getRegUsageForType(Type *Ty, RISCV::RVVBitsPerBlock) { ... } ``` The function name is getRegUsageForType, the return type is unsigned int, and the two parameters are Ty and RISCV::RVVBitsPerBlock. Step 2: Initialize the Register Count The function will calculate the estimated number of registers required to represent the given type. To start, we will initialize a variable named RegCount to 0 as follows: ``` unsigned int getRegUsageForType(Type *Ty, RISCV::RVVBitsPerBlock) { unsigned int RegCount = 0; ... } ``` Step 3: Check if the given type is a vector type The RISC-V Vector Extension (RVV) supports vector data types that have a fixed number of elements. Therefore, the first step in calculating the register usage for a type is to check if it is a vector type. ``` unsigned int getRegUsageForType(Type *Ty, RISCV::RVVBitsPerBlock) { unsigned int RegCount = 0; if (Ty->isVectorTy()) { ... } return RegCount; } ``` Step 4: Get the Vector Element Type If the given type is a vector type, we need to calculate the register usage for the vector element type. We can use the function getVectorElementType to get the element type of the vector type as follows: ``` if (Ty->isVectorTy()) { Type *ElemTy = dyn_cast(Ty)->getElementType(); ... } ``` Step 5: Calculate the Number of Elements To get the estimated number of registers required for the vector type, we need to calculate the number of elements in the vector. We can use the getVectorNumElements function to get this value as follows: ``` if (Ty->isVectorTy()) { Type *ElemTy = dyn_cast(Ty)->getElementType(); unsigned int NumElements = dyn_cast(Ty)->getNumElements(); ... }" LLVM,RISCV,925,"llvm::UnrollingPreferences getUnrollingPreferences() { llvm::UnrollingPreferences preferences; preferences.Opcode = RISCV::INSTS; preferences.ConsecutiveDepCheck = true; preferences.MaxLoopSize = 0; preferences.Partial = 0; preferences.RuntimeChunkSize = 4; preferences.AllowPartial = 0; preferences.Prefix = ""Loop has:\n""; preferences.CountS = ""Blocks: ""; preferences.BodyS = ""\n""; preferences.ExitS = ""Exit blocks: ""; preferences.Width = 2; preferences.UnrollStyle = 4; preferences.VectorizeFlag = ""llvm.loop.isvectorized""; preferences.VectorizeWidth = 0; preferences.VectorizeCost = ""Cost of loop: ""; preferences.PrimitiveCost = 60; preferences.DuplicateFactor = 12; return preferences; }" LLVM,RISCV,926, LLVM,RISCV,927, LLVM,RISCV,928,"unsigned int getIntImmCost(const RISCVInstrInfo& InstInfo, unsigned int Opcode, int64_t Value, unsigned int Idx, const RISCVSubtarget&) { if (Opcode == RISCV::ADDI || Opcode == RISCV::ADDI64) { return 1; } return 0; }" LLVM,RISCV,929,"function getGatherScatterOpCost(Target, DstTy, NumIndices) { if (Target != RISCV) { return 0; } Size = getTypeSizeInBytes(DstTy); Cost = Size * NumIndices; return Cost; }" LLVM,RISCV,930,"int RISCVTargetLowering::getIntImmCost(const APInt& Imm, Type *Ty, TLIContext &Ctx) const { return 64; }" LLVM,RISCV,931,"int getIntImmCostInst(uint8_t OpType, uint64_t Immediate) { int Cost = 0; if (OpType != RISCV) return Cost; if (Immediate > 0xFFFFFFFF) return Cost; if (Immediate <= 0xFF) { Cost = 1; } else if (Immediate <= 0xFFFF) { Cost = 2; } else if (Immediate <= 0xFFFFFF) { Cost = 3; } else if (Immediate <= 0xFFFFFFFF) { Cost = 4; } return Cost; }" LLVM,RISCV,932, LLVM,RISCV,933,llvm::TargetLowering *getTLI(llvm::RISCVModule &Input) { return (new llvm::RISCVTargetLowering(Input.getInfo())).getTLI(); } GCC,arc,418,"void workaround_arc_anomaly() { set_target(""arc""); arc_hazard(); compute_branch_shortening(NOP_SIZE); set_target(""default""); }" GCC,arc,419,